hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c4137fee0195ae2ff020efa86f7f3f471b3d7df
| 994
|
py
|
Python
|
imgur_url.py
|
NightKirie/NCKU_NLP_2108_industry3
|
23ac13644b140587e23cfeffb114c7c6f46f17a2
|
[
"MIT"
] | 1
|
2018-06-11T07:36:04.000Z
|
2018-06-11T07:36:04.000Z
|
imgur_url.py
|
NightKirie/NCKU_NLP_2108_industry3
|
23ac13644b140587e23cfeffb114c7c6f46f17a2
|
[
"MIT"
] | null | null | null |
imgur_url.py
|
NightKirie/NCKU_NLP_2108_industry3
|
23ac13644b140587e23cfeffb114c7c6f46f17a2
|
[
"MIT"
] | 4
|
2018-05-19T11:31:20.000Z
|
2018-07-01T20:58:29.000Z
|
#-*-coding:utf-8-*-
import tempfile, os
from imgurpython import ImgurClient
from config import client_id, client_secret, album_id, access_token, refresh_token, line_channel_access_token, line_channel_secret
static_tmp_path = os.path.join(os.path.dirname(__file__), 'static', 'tmp')
def getUrl(img):
ext = 'png'
with tempfile.NamedTemporaryFile(dir=static_tmp_path, prefix=ext + '-', delete=False) as tf:
img.save(tf, "PNG")
img.close()
tempfile_path = tf.name
dist_path = tempfile_path + '.' + ext
dist_name = os.path.basename(dist_path)
os.rename(tempfile_path, dist_path)
client = ImgurClient(client_id, client_secret, access_token, refresh_token)
config = {
'album': album_id,
'name': '',
'title': '',
'description': ''
}
path = os.path.join('static', 'tmp', dist_name)
image = client.upload_from_path(path, config=config, anon=False)
os.remove(path)
print(path)
return image['link']
| 33.133333
| 130
| 0.671026
|
import tempfile, os
from imgurpython import ImgurClient
from config import client_id, client_secret, album_id, access_token, refresh_token, line_channel_access_token, line_channel_secret
static_tmp_path = os.path.join(os.path.dirname(__file__), 'static', 'tmp')
def getUrl(img):
ext = 'png'
with tempfile.NamedTemporaryFile(dir=static_tmp_path, prefix=ext + '-', delete=False) as tf:
img.save(tf, "PNG")
img.close()
tempfile_path = tf.name
dist_path = tempfile_path + '.' + ext
dist_name = os.path.basename(dist_path)
os.rename(tempfile_path, dist_path)
client = ImgurClient(client_id, client_secret, access_token, refresh_token)
config = {
'album': album_id,
'name': '',
'title': '',
'description': ''
}
path = os.path.join('static', 'tmp', dist_name)
image = client.upload_from_path(path, config=config, anon=False)
os.remove(path)
print(path)
return image['link']
| true
| true
|
1c41387669282e9bf7492f8609e89d02101bd1cf
| 9,604
|
py
|
Python
|
tests/data/drivers/test_kql_driver.py
|
Noezor/msticpy
|
f0d6d0d0bbaeba1ca060787b9929350804fa6dc5
|
[
"MIT"
] | 2
|
2020-11-03T05:56:10.000Z
|
2020-11-03T05:56:17.000Z
|
tests/data/drivers/test_kql_driver.py
|
Noezor/msticpy
|
f0d6d0d0bbaeba1ca060787b9929350804fa6dc5
|
[
"MIT"
] | null | null | null |
tests/data/drivers/test_kql_driver.py
|
Noezor/msticpy
|
f0d6d0d0bbaeba1ca060787b9929350804fa6dc5
|
[
"MIT"
] | 1
|
2022-02-06T18:56:15.000Z
|
2022-02-06T18:56:15.000Z
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""datq query test class."""
from contextlib import redirect_stdout
import io
from unittest.mock import patch
import pytest
import pytest_check as check
import pandas as pd
from adal.adal_error import AdalError
from Kqlmagic.kql_response import KqlError
from Kqlmagic.kql_engine import KqlEngineError
from Kqlmagic.my_aad_helper import AuthenticationError
from msticpy.common.exceptions import (
MsticpyKqlConnectionError,
MsticpyNotConnectedError,
MsticpyNoDataSourceError,
MsticpyDataQueryError,
)
from msticpy.data.data_providers import KqlDriver
from ...unit_test_lib import get_test_data_path
_TEST_DATA = get_test_data_path()
GET_IPYTHON_PATCH = KqlDriver.__module__ + ".get_ipython"
# pylint: disable=too-many-branches, too-many-return-statements
# pylint: disable=no-self-use
class KqlResultTest:
"""Test Kql result class."""
def __init__(self, code=0, partial=False, status="success"):
"""Create instance."""
self.completion_query_info = {"StatusCode": code, "StatusDescription": status}
self.is_partial_table = partial
def to_dataframe(self):
"""Convert dataframe."""
return pd.DataFrame()
class _MockIPython:
"""IPython get_ipython mock."""
def find_magic(self, magic):
"""Return None if magic isn't == kql."""
if magic == "kql":
return "Kqlmagic"
return None
def run_line_magic(self, magic, line):
"""Mock run line magic."""
return self._run_magic(magic, line)
def run_cell_magic(self, magic, line, cell):
"""Mock run cell magic."""
content = cell or line
return self._run_magic(magic, content)
@staticmethod # noqa: MC0001
def _run_magic(magic, content):
if magic == "reload_ext":
return None
if magic == "config":
if "=" in content:
return "dummy_setting"
return True
check.equal(magic, "kql")
if "KqlErrorUnk" in content:
resp = '{"error": {"code": "UnknownError"}}'
raise KqlError(http_response=resp, message=resp)
if "KqlErrorWS" in content:
resp = '{"error": {"code": "WorkspaceNotFoundError"}}'
raise KqlError(http_response=resp, message=resp)
if "KqlEngineError" in content:
raise KqlEngineError("Test Error")
if "AdalErrorUnk" in content:
resp = {"error_description": "unknown error"}
raise AdalError("Test Error", error_response=resp)
if "AdalErrorNR" in content:
raise AdalError("Test Error")
if "AdalErrorPoll" in content:
raise AdalError("Unexpected polling state code_expired")
if "AuthenticationError" in content:
raise AuthenticationError("Test Error")
if content == "--schema":
return {
"table1": {"field1": int, "field2": str},
"table2": {"field1": int, "field2": str},
}
if "query_partial" in content:
return KqlResultTest(code=0, partial=True, status="partial")
if "query_failed" in content:
return KqlResultTest(code=1, partial=False, status="failed")
return KqlResultTest(code=0, partial=False, status="success")
@patch(GET_IPYTHON_PATCH)
def test_kql_load(get_ipython):
"""Check loaded true."""
get_ipython.return_value = _MockIPython()
kql_driver = KqlDriver()
check.is_true(kql_driver.loaded)
kql_driver = KqlDriver(connection_str="la://connection")
check.is_true(kql_driver.loaded)
check.is_true(kql_driver.connected)
@patch(GET_IPYTHON_PATCH)
def test_kql_connect(get_ipython):
"""Check loaded true."""
get_ipython.return_value = _MockIPython()
kql_driver = KqlDriver()
check.is_true(kql_driver.loaded)
kql_driver.connect(connection_str="la://connection")
check.is_true(kql_driver.connected)
@patch(GET_IPYTHON_PATCH)
def test_kql_connect_no_cs(get_ipython):
"""Check loaded true."""
get_ipython.return_value = _MockIPython()
kql_driver = KqlDriver()
check.is_true(kql_driver.loaded)
with pytest.raises(MsticpyKqlConnectionError) as mp_ex:
kql_driver.connect()
check.is_in("no connection string", mp_ex.value.args)
@patch(GET_IPYTHON_PATCH)
def test_kql_connect_kql_exceptions(get_ipython):
"""Check loaded true."""
get_ipython.return_value = _MockIPython()
kql_driver = KqlDriver()
with pytest.raises(MsticpyKqlConnectionError) as mp_ex:
kql_driver.connect(connection_str="la://connection+KqlErrorUnk")
check.is_in("Kql response error", mp_ex.value.args)
check.is_false(kql_driver.connected)
with pytest.raises(MsticpyKqlConnectionError) as mp_ex:
kql_driver.connect(
connection_str="la://connection.workspace('1234').tenant(KqlErrorWS)"
)
check.is_in("unknown workspace", mp_ex.value.args)
check.is_false(kql_driver.connected)
with pytest.raises(MsticpyKqlConnectionError) as mp_ex:
kql_driver.connect(
connection_str="la://connection.workspace('1234').tenant(KqlEngineError)"
)
check.is_in("kql connection error", mp_ex.value.args)
check.is_false(kql_driver.connected)
@patch(GET_IPYTHON_PATCH)
def test_kql_connect_adal_exceptions(get_ipython):
"""Check loaded true."""
get_ipython.return_value = _MockIPython()
kql_driver = KqlDriver()
with pytest.raises(MsticpyKqlConnectionError) as mp_ex:
kql_driver.connect(connection_str="la://connection+AdalErrorUnk")
check.is_in("could not authenticate to tenant", mp_ex.value.args)
check.is_false(kql_driver.connected)
with pytest.raises(MsticpyKqlConnectionError) as mp_ex:
kql_driver.connect(connection_str="la://connection+AdalErrorNR")
check.is_in("could not authenticate to tenant", mp_ex.value.args)
check.is_in("Full error", str(mp_ex.value.args))
check.is_false(kql_driver.connected)
with pytest.raises(MsticpyKqlConnectionError) as mp_ex:
kql_driver.connect(connection_str="la://connection+AdalErrorPoll")
check.is_in("authentication timed out", mp_ex.value.args)
check.is_false(kql_driver.connected)
@patch(GET_IPYTHON_PATCH)
def test_kql_connect_authn_exceptions(get_ipython):
"""Check loaded true."""
get_ipython.return_value = _MockIPython()
kql_driver = KqlDriver()
with pytest.raises(MsticpyKqlConnectionError) as mp_ex:
kql_driver.connect(connection_str="la://connection+AuthenticationError")
check.is_in("authentication failed", mp_ex.value.args)
check.is_false(kql_driver.connected)
@patch(GET_IPYTHON_PATCH)
def test_kql_schema(get_ipython):
"""Check loaded true."""
get_ipython.return_value = _MockIPython()
kql_driver = KqlDriver()
kql_driver.connect(connection_str="la://connection")
check.is_in("table1", kql_driver.schema)
check.is_in("table2", kql_driver.schema)
check.is_in("field1", kql_driver.schema["table1"])
@patch(GET_IPYTHON_PATCH)
def test_kql_query_not_connected(get_ipython):
"""Check loaded true."""
get_ipython.return_value = _MockIPython()
kql_driver = KqlDriver()
with pytest.raises(MsticpyNotConnectedError) as mp_ex:
kql_driver.query("test")
check.is_in("not connected to a workspace.", mp_ex.value.args)
check.is_false(kql_driver.connected)
@patch(GET_IPYTHON_PATCH)
def test_kql_query_failed(get_ipython):
"""Check loaded true."""
get_ipython.return_value = _MockIPython()
kql_driver = KqlDriver()
kql_driver.connect(connection_str="la://connection")
with pytest.raises(MsticpyDataQueryError) as mp_ex:
kql_driver.query("test query_failed")
arg_str = "\n".join([str(arg) for arg in mp_ex.value.args])
check.is_in("Query:", arg_str)
check.is_in("test query_failed", arg_str)
check.is_in("Query failed", arg_str)
check.is_in(
"https://msticpy.readthedocs.io/en/latest/DataAcquisition.html", arg_str
)
@patch(GET_IPYTHON_PATCH)
def test_kql_query_success(get_ipython):
"""Check loaded true."""
get_ipython.return_value = _MockIPython()
kql_driver = KqlDriver()
kql_driver.connect(connection_str="la://connection")
result_df = kql_driver.query("test query")
check.is_instance(result_df, pd.DataFrame)
@patch(GET_IPYTHON_PATCH)
def test_kql_query_partial(get_ipython):
"""Check loaded true."""
get_ipython.return_value = _MockIPython()
kql_driver = KqlDriver()
kql_driver.connect(connection_str="la://connection")
output = io.StringIO()
with redirect_stdout(output):
result_df = kql_driver.query("test query_partial")
check.is_instance(result_df, pd.DataFrame)
check.is_in("Warning - query returned partial", output.getvalue())
@patch(GET_IPYTHON_PATCH)
def test_kql_query_no_table(get_ipython):
"""Check loaded true."""
get_ipython.return_value = _MockIPython()
kql_driver = KqlDriver()
kql_driver.connect(connection_str="la://connection")
with pytest.raises(MsticpyNoDataSourceError) as mp_ex:
query_source = {"args.table": "table3"}
kql_driver.query("test query", query_source=query_source)
check.is_in("table3 not found.", mp_ex.value.args)
| 33.463415
| 86
| 0.688567
|
from contextlib import redirect_stdout
import io
from unittest.mock import patch
import pytest
import pytest_check as check
import pandas as pd
from adal.adal_error import AdalError
from Kqlmagic.kql_response import KqlError
from Kqlmagic.kql_engine import KqlEngineError
from Kqlmagic.my_aad_helper import AuthenticationError
from msticpy.common.exceptions import (
MsticpyKqlConnectionError,
MsticpyNotConnectedError,
MsticpyNoDataSourceError,
MsticpyDataQueryError,
)
from msticpy.data.data_providers import KqlDriver
from ...unit_test_lib import get_test_data_path
_TEST_DATA = get_test_data_path()
GET_IPYTHON_PATCH = KqlDriver.__module__ + ".get_ipython"
class KqlResultTest:
def __init__(self, code=0, partial=False, status="success"):
self.completion_query_info = {"StatusCode": code, "StatusDescription": status}
self.is_partial_table = partial
def to_dataframe(self):
return pd.DataFrame()
class _MockIPython:
def find_magic(self, magic):
if magic == "kql":
return "Kqlmagic"
return None
def run_line_magic(self, magic, line):
return self._run_magic(magic, line)
def run_cell_magic(self, magic, line, cell):
content = cell or line
return self._run_magic(magic, content)
@staticmethod
def _run_magic(magic, content):
if magic == "reload_ext":
return None
if magic == "config":
if "=" in content:
return "dummy_setting"
return True
check.equal(magic, "kql")
if "KqlErrorUnk" in content:
resp = '{"error": {"code": "UnknownError"}}'
raise KqlError(http_response=resp, message=resp)
if "KqlErrorWS" in content:
resp = '{"error": {"code": "WorkspaceNotFoundError"}}'
raise KqlError(http_response=resp, message=resp)
if "KqlEngineError" in content:
raise KqlEngineError("Test Error")
if "AdalErrorUnk" in content:
resp = {"error_description": "unknown error"}
raise AdalError("Test Error", error_response=resp)
if "AdalErrorNR" in content:
raise AdalError("Test Error")
if "AdalErrorPoll" in content:
raise AdalError("Unexpected polling state code_expired")
if "AuthenticationError" in content:
raise AuthenticationError("Test Error")
if content == "--schema":
return {
"table1": {"field1": int, "field2": str},
"table2": {"field1": int, "field2": str},
}
if "query_partial" in content:
return KqlResultTest(code=0, partial=True, status="partial")
if "query_failed" in content:
return KqlResultTest(code=1, partial=False, status="failed")
return KqlResultTest(code=0, partial=False, status="success")
@patch(GET_IPYTHON_PATCH)
def test_kql_load(get_ipython):
get_ipython.return_value = _MockIPython()
kql_driver = KqlDriver()
check.is_true(kql_driver.loaded)
kql_driver = KqlDriver(connection_str="la://connection")
check.is_true(kql_driver.loaded)
check.is_true(kql_driver.connected)
@patch(GET_IPYTHON_PATCH)
def test_kql_connect(get_ipython):
get_ipython.return_value = _MockIPython()
kql_driver = KqlDriver()
check.is_true(kql_driver.loaded)
kql_driver.connect(connection_str="la://connection")
check.is_true(kql_driver.connected)
@patch(GET_IPYTHON_PATCH)
def test_kql_connect_no_cs(get_ipython):
get_ipython.return_value = _MockIPython()
kql_driver = KqlDriver()
check.is_true(kql_driver.loaded)
with pytest.raises(MsticpyKqlConnectionError) as mp_ex:
kql_driver.connect()
check.is_in("no connection string", mp_ex.value.args)
@patch(GET_IPYTHON_PATCH)
def test_kql_connect_kql_exceptions(get_ipython):
get_ipython.return_value = _MockIPython()
kql_driver = KqlDriver()
with pytest.raises(MsticpyKqlConnectionError) as mp_ex:
kql_driver.connect(connection_str="la://connection+KqlErrorUnk")
check.is_in("Kql response error", mp_ex.value.args)
check.is_false(kql_driver.connected)
with pytest.raises(MsticpyKqlConnectionError) as mp_ex:
kql_driver.connect(
connection_str="la://connection.workspace('1234').tenant(KqlErrorWS)"
)
check.is_in("unknown workspace", mp_ex.value.args)
check.is_false(kql_driver.connected)
with pytest.raises(MsticpyKqlConnectionError) as mp_ex:
kql_driver.connect(
connection_str="la://connection.workspace('1234').tenant(KqlEngineError)"
)
check.is_in("kql connection error", mp_ex.value.args)
check.is_false(kql_driver.connected)
@patch(GET_IPYTHON_PATCH)
def test_kql_connect_adal_exceptions(get_ipython):
get_ipython.return_value = _MockIPython()
kql_driver = KqlDriver()
with pytest.raises(MsticpyKqlConnectionError) as mp_ex:
kql_driver.connect(connection_str="la://connection+AdalErrorUnk")
check.is_in("could not authenticate to tenant", mp_ex.value.args)
check.is_false(kql_driver.connected)
with pytest.raises(MsticpyKqlConnectionError) as mp_ex:
kql_driver.connect(connection_str="la://connection+AdalErrorNR")
check.is_in("could not authenticate to tenant", mp_ex.value.args)
check.is_in("Full error", str(mp_ex.value.args))
check.is_false(kql_driver.connected)
with pytest.raises(MsticpyKqlConnectionError) as mp_ex:
kql_driver.connect(connection_str="la://connection+AdalErrorPoll")
check.is_in("authentication timed out", mp_ex.value.args)
check.is_false(kql_driver.connected)
@patch(GET_IPYTHON_PATCH)
def test_kql_connect_authn_exceptions(get_ipython):
get_ipython.return_value = _MockIPython()
kql_driver = KqlDriver()
with pytest.raises(MsticpyKqlConnectionError) as mp_ex:
kql_driver.connect(connection_str="la://connection+AuthenticationError")
check.is_in("authentication failed", mp_ex.value.args)
check.is_false(kql_driver.connected)
@patch(GET_IPYTHON_PATCH)
def test_kql_schema(get_ipython):
get_ipython.return_value = _MockIPython()
kql_driver = KqlDriver()
kql_driver.connect(connection_str="la://connection")
check.is_in("table1", kql_driver.schema)
check.is_in("table2", kql_driver.schema)
check.is_in("field1", kql_driver.schema["table1"])
@patch(GET_IPYTHON_PATCH)
def test_kql_query_not_connected(get_ipython):
get_ipython.return_value = _MockIPython()
kql_driver = KqlDriver()
with pytest.raises(MsticpyNotConnectedError) as mp_ex:
kql_driver.query("test")
check.is_in("not connected to a workspace.", mp_ex.value.args)
check.is_false(kql_driver.connected)
@patch(GET_IPYTHON_PATCH)
def test_kql_query_failed(get_ipython):
get_ipython.return_value = _MockIPython()
kql_driver = KqlDriver()
kql_driver.connect(connection_str="la://connection")
with pytest.raises(MsticpyDataQueryError) as mp_ex:
kql_driver.query("test query_failed")
arg_str = "\n".join([str(arg) for arg in mp_ex.value.args])
check.is_in("Query:", arg_str)
check.is_in("test query_failed", arg_str)
check.is_in("Query failed", arg_str)
check.is_in(
"https://msticpy.readthedocs.io/en/latest/DataAcquisition.html", arg_str
)
@patch(GET_IPYTHON_PATCH)
def test_kql_query_success(get_ipython):
get_ipython.return_value = _MockIPython()
kql_driver = KqlDriver()
kql_driver.connect(connection_str="la://connection")
result_df = kql_driver.query("test query")
check.is_instance(result_df, pd.DataFrame)
@patch(GET_IPYTHON_PATCH)
def test_kql_query_partial(get_ipython):
get_ipython.return_value = _MockIPython()
kql_driver = KqlDriver()
kql_driver.connect(connection_str="la://connection")
output = io.StringIO()
with redirect_stdout(output):
result_df = kql_driver.query("test query_partial")
check.is_instance(result_df, pd.DataFrame)
check.is_in("Warning - query returned partial", output.getvalue())
@patch(GET_IPYTHON_PATCH)
def test_kql_query_no_table(get_ipython):
get_ipython.return_value = _MockIPython()
kql_driver = KqlDriver()
kql_driver.connect(connection_str="la://connection")
with pytest.raises(MsticpyNoDataSourceError) as mp_ex:
query_source = {"args.table": "table3"}
kql_driver.query("test query", query_source=query_source)
check.is_in("table3 not found.", mp_ex.value.args)
| true
| true
|
1c4138a2985a9db379876cd6fdbf4250c6a503c9
| 8,799
|
py
|
Python
|
f5lbaasdriver/v2/bigip/test/test_service_builder.py
|
Sinan828/mitaka_driver
|
26a4cb147a1d531d487738a818493975ce808b4e
|
[
"Apache-2.0"
] | null | null | null |
f5lbaasdriver/v2/bigip/test/test_service_builder.py
|
Sinan828/mitaka_driver
|
26a4cb147a1d531d487738a818493975ce808b4e
|
[
"Apache-2.0"
] | null | null | null |
f5lbaasdriver/v2/bigip/test/test_service_builder.py
|
Sinan828/mitaka_driver
|
26a4cb147a1d531d487738a818493975ce808b4e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import pytest
from uuid import uuid4
from f5lbaasdriver.v2.bigip import exceptions as f5_exc
from f5lbaasdriver.v2.bigip.service_builder import LBaaSv2ServiceBuilder
class FakeDict(dict):
"""Can be used as Neutron model object or as service builder dict"""
def __init__(self, *args, **kwargs):
super(FakeDict, self).__init__(*args, **kwargs)
if 'id' not in kwargs:
self['id'] = _uuid()
def __getattr__(self, item):
"""Needed for using as a model object"""
if item in self:
return self[item]
else:
return None
def to_api_dict(self):
return self
def to_dict(self, **kwargs):
return self
def _uuid():
"""Create a random UUID string for model object IDs"""
return str(uuid4())
@pytest.fixture
def listeners():
return [FakeDict(default_pool=FakeDict(),
l7_policies=[]),
FakeDict(default_pool=FakeDict(),
l7_policies=[])]
@pytest.fixture
def l7policies():
policies = []
ids = [_uuid(), _uuid()]
for i, id in enumerate(ids):
policy = FakeDict(listener_id=id,
listeners=[FakeDict(id=id)])
assert policy.listener_id == policy.listeners[0].id == id
policies.append(policy)
return policies
@pytest.fixture
def two_listener_l7policies():
return [FakeDict(listeners=[FakeDict(), FakeDict()])]
@pytest.fixture
def l7rules():
return [FakeDict(policies=[FakeDict()]),
FakeDict(policies=[FakeDict()])]
@pytest.fixture
def two_policy_l7rules():
return [FakeDict(policies=[FakeDict(), FakeDict()])]
@pytest.fixture
def loadbalancer():
return FakeDict()
@pytest.fixture
def monitors():
return [FakeDict(),
FakeDict()]
@pytest.fixture
def pools(monitors):
pools = []
for monitor in monitors:
pool = FakeDict(healthmonitor_id=monitor['id'])
monitor['pool_id'] = pool['id']
pools.append(pool)
return pools
@pytest.fixture
def members():
return [FakeDict(subnet_id=_uuid())]
def subnet():
return FakeDict(network_id=_uuid())
def test_get_l7policies(listeners, l7policies):
"""Test that get_l7policies returns valid list of dict"""
context = mock.MagicMock()
driver = mock.MagicMock()
service_builder = LBaaSv2ServiceBuilder(driver)
service_builder.driver.plugin.db.get_l7policies = \
mock.MagicMock(return_value=l7policies)
policies = service_builder._get_l7policies(context, listeners)
assert len(policies) > 0
assert policies[0] is l7policies[0]
def test_get_l7policies_filter(listeners):
"""Test that get_l7policies() is called with filter of listener IDs"""
context = mock.MagicMock()
driver = mock.MagicMock()
# construct an equivalent filter to what service_builder should use
filters = {'listener_id': [l['id'] for l in listeners]}
service_builder = LBaaSv2ServiceBuilder(driver)
service_builder._get_l7policies(context, listeners)
# assert that the expected filter was used
service_builder.driver.plugin.db.get_l7policies.assert_called_with(
context, filters=filters)
def test_get_l7policies_no_listeners():
"""Test that an empty listener list input returns an empty policy list."""
context = mock.MagicMock()
driver = mock.MagicMock()
listeners = []
service_builder = LBaaSv2ServiceBuilder(driver)
l7policies = service_builder._get_l7policies(context, listeners)
assert not l7policies
def test_get_l7policy_rules(l7policies, l7rules):
"""Test that get_l7policies returns valid list of dict"""
context = mock.MagicMock()
driver = mock.MagicMock()
service_builder = LBaaSv2ServiceBuilder(driver)
service_builder.driver.plugin.db.get_l7policy_rules = mock.MagicMock(
return_value=l7rules)
rules = service_builder._get_l7policy_rules(context, l7policies)
assert len(rules) > 0
assert rules[0] is l7rules[0].to_api_dict()
def test_get_l7policy_rules_filter(l7policies):
"""Test that get_l7policy_rules() is called with filter of l7policy IDs"""
context = mock.MagicMock()
driver = mock.MagicMock()
service_builder = LBaaSv2ServiceBuilder(driver)
service_builder._get_l7policy_rules(context, l7policies)
assert service_builder.driver.plugin.db.get_l7policy_rules.call_args_list \
== [mock.call(context, l7policies[0]['id']),
mock.call(context, l7policies[1]['id'])]
def test_get_l7policy_rules_no_policies():
"""Test that an empty policies input list returns an empty rule list."""
context = mock.MagicMock()
driver = mock.MagicMock()
l7policies = []
service_builder = LBaaSv2ServiceBuilder(driver)
rules = service_builder._get_l7policy_rules(context, l7policies)
assert not rules
def test_get_l7policies_more_than_one_listener_error(
listeners, two_listener_l7policies):
"""Exception is raised when > 1 listener for a policy."""
context = mock.MagicMock()
driver = mock.MagicMock()
service_builder = LBaaSv2ServiceBuilder(driver)
service_builder.driver.plugin.db.get_l7policies = mock.MagicMock(
return_value=two_listener_l7policies)
with pytest.raises(f5_exc.PolicyHasMoreThanOneListener) as ex:
service_builder._get_l7policies(context, listeners)
assert 'A policy should have only one listener, but found 2 for policy ' +\
two_listener_l7policies[0].id in ex.value.message
def test_get_l7policy_rules_more_than_one_policy(
l7policies, two_policy_l7rules):
"""Exception is raised when > 1 policy for a rule."""
context = mock.MagicMock()
driver = mock.MagicMock()
service_builder = LBaaSv2ServiceBuilder(driver)
service_builder.driver.plugin.db.get_l7policy_rules = mock.MagicMock(
return_value=two_policy_l7rules)
with pytest.raises(f5_exc.RuleHasMoreThanOnePolicy) as ex:
service_builder._get_l7policy_rules(context, l7policies)
assert 'A rule should have only one policy, but found 2 for rule ' + \
two_policy_l7rules[0].id in ex.value.message
def test_get_listeners(loadbalancer, listeners):
context = mock.MagicMock()
driver = mock.MagicMock()
service_builder = LBaaSv2ServiceBuilder(driver)
service_builder.driver.plugin.db.get_listeners = mock.MagicMock(
return_value=listeners)
test_listeners = service_builder._get_listeners(context, loadbalancer)
assert len(test_listeners) == len(listeners)
assert test_listeners[0] == listeners[0].to_api_dict()
assert test_listeners[1] == listeners[1].to_api_dict()
def test_get_pools(loadbalancer, pools, monitors):
context = mock.MagicMock()
driver = mock.MagicMock()
service_builder = LBaaSv2ServiceBuilder(driver)
service_builder.driver.plugin.db.get_pools_and_healthmonitors = \
mock.MagicMock(return_value=(pools, monitors))
test_pools, test_monitors = \
service_builder._get_pools_and_healthmonitors(
context, loadbalancer)
for pool, test_pool, monitor in zip(pools, test_pools, monitors):
assert test_pool is pool
assert test_pool['healthmonitor_id'] == monitor['id']
def test_get_members(pools, members):
context = mock.MagicMock()
driver = mock.MagicMock()
subnet_map = mock.MagicMock()
network_map = mock.MagicMock()
service_builder = LBaaSv2ServiceBuilder(driver)
service_builder.driver.plugin.db._get_members = \
mock.MagicMock(return_value=members)
test_members = service_builder._get_members(context, pools,
subnet_map, network_map)
for test_member, member in zip(test_members, members):
assert test_member is member
def test__pool_to_dict():
'''Ensure function does not add listeners or listener_id to pool dict.'''
driver = mock.MagicMock()
fake_pool = FakeDict()
fake_pool.members = []
fake_pool.l7_policies = []
sb = LBaaSv2ServiceBuilder(driver)
pool_dict = sb._pool_to_dict(fake_pool)
assert 'listener_id' not in pool_dict
assert 'listeners' not in pool_dict
| 30.341379
| 79
| 0.703716
|
import mock
import pytest
from uuid import uuid4
from f5lbaasdriver.v2.bigip import exceptions as f5_exc
from f5lbaasdriver.v2.bigip.service_builder import LBaaSv2ServiceBuilder
class FakeDict(dict):
def __init__(self, *args, **kwargs):
super(FakeDict, self).__init__(*args, **kwargs)
if 'id' not in kwargs:
self['id'] = _uuid()
def __getattr__(self, item):
if item in self:
return self[item]
else:
return None
def to_api_dict(self):
return self
def to_dict(self, **kwargs):
return self
def _uuid():
return str(uuid4())
@pytest.fixture
def listeners():
return [FakeDict(default_pool=FakeDict(),
l7_policies=[]),
FakeDict(default_pool=FakeDict(),
l7_policies=[])]
@pytest.fixture
def l7policies():
policies = []
ids = [_uuid(), _uuid()]
for i, id in enumerate(ids):
policy = FakeDict(listener_id=id,
listeners=[FakeDict(id=id)])
assert policy.listener_id == policy.listeners[0].id == id
policies.append(policy)
return policies
@pytest.fixture
def two_listener_l7policies():
return [FakeDict(listeners=[FakeDict(), FakeDict()])]
@pytest.fixture
def l7rules():
return [FakeDict(policies=[FakeDict()]),
FakeDict(policies=[FakeDict()])]
@pytest.fixture
def two_policy_l7rules():
return [FakeDict(policies=[FakeDict(), FakeDict()])]
@pytest.fixture
def loadbalancer():
return FakeDict()
@pytest.fixture
def monitors():
return [FakeDict(),
FakeDict()]
@pytest.fixture
def pools(monitors):
pools = []
for monitor in monitors:
pool = FakeDict(healthmonitor_id=monitor['id'])
monitor['pool_id'] = pool['id']
pools.append(pool)
return pools
@pytest.fixture
def members():
return [FakeDict(subnet_id=_uuid())]
def subnet():
return FakeDict(network_id=_uuid())
def test_get_l7policies(listeners, l7policies):
context = mock.MagicMock()
driver = mock.MagicMock()
service_builder = LBaaSv2ServiceBuilder(driver)
service_builder.driver.plugin.db.get_l7policies = \
mock.MagicMock(return_value=l7policies)
policies = service_builder._get_l7policies(context, listeners)
assert len(policies) > 0
assert policies[0] is l7policies[0]
def test_get_l7policies_filter(listeners):
context = mock.MagicMock()
driver = mock.MagicMock()
filters = {'listener_id': [l['id'] for l in listeners]}
service_builder = LBaaSv2ServiceBuilder(driver)
service_builder._get_l7policies(context, listeners)
service_builder.driver.plugin.db.get_l7policies.assert_called_with(
context, filters=filters)
def test_get_l7policies_no_listeners():
context = mock.MagicMock()
driver = mock.MagicMock()
listeners = []
service_builder = LBaaSv2ServiceBuilder(driver)
l7policies = service_builder._get_l7policies(context, listeners)
assert not l7policies
def test_get_l7policy_rules(l7policies, l7rules):
context = mock.MagicMock()
driver = mock.MagicMock()
service_builder = LBaaSv2ServiceBuilder(driver)
service_builder.driver.plugin.db.get_l7policy_rules = mock.MagicMock(
return_value=l7rules)
rules = service_builder._get_l7policy_rules(context, l7policies)
assert len(rules) > 0
assert rules[0] is l7rules[0].to_api_dict()
def test_get_l7policy_rules_filter(l7policies):
context = mock.MagicMock()
driver = mock.MagicMock()
service_builder = LBaaSv2ServiceBuilder(driver)
service_builder._get_l7policy_rules(context, l7policies)
assert service_builder.driver.plugin.db.get_l7policy_rules.call_args_list \
== [mock.call(context, l7policies[0]['id']),
mock.call(context, l7policies[1]['id'])]
def test_get_l7policy_rules_no_policies():
context = mock.MagicMock()
driver = mock.MagicMock()
l7policies = []
service_builder = LBaaSv2ServiceBuilder(driver)
rules = service_builder._get_l7policy_rules(context, l7policies)
assert not rules
def test_get_l7policies_more_than_one_listener_error(
listeners, two_listener_l7policies):
context = mock.MagicMock()
driver = mock.MagicMock()
service_builder = LBaaSv2ServiceBuilder(driver)
service_builder.driver.plugin.db.get_l7policies = mock.MagicMock(
return_value=two_listener_l7policies)
with pytest.raises(f5_exc.PolicyHasMoreThanOneListener) as ex:
service_builder._get_l7policies(context, listeners)
assert 'A policy should have only one listener, but found 2 for policy ' +\
two_listener_l7policies[0].id in ex.value.message
def test_get_l7policy_rules_more_than_one_policy(
l7policies, two_policy_l7rules):
context = mock.MagicMock()
driver = mock.MagicMock()
service_builder = LBaaSv2ServiceBuilder(driver)
service_builder.driver.plugin.db.get_l7policy_rules = mock.MagicMock(
return_value=two_policy_l7rules)
with pytest.raises(f5_exc.RuleHasMoreThanOnePolicy) as ex:
service_builder._get_l7policy_rules(context, l7policies)
assert 'A rule should have only one policy, but found 2 for rule ' + \
two_policy_l7rules[0].id in ex.value.message
def test_get_listeners(loadbalancer, listeners):
context = mock.MagicMock()
driver = mock.MagicMock()
service_builder = LBaaSv2ServiceBuilder(driver)
service_builder.driver.plugin.db.get_listeners = mock.MagicMock(
return_value=listeners)
test_listeners = service_builder._get_listeners(context, loadbalancer)
assert len(test_listeners) == len(listeners)
assert test_listeners[0] == listeners[0].to_api_dict()
assert test_listeners[1] == listeners[1].to_api_dict()
def test_get_pools(loadbalancer, pools, monitors):
context = mock.MagicMock()
driver = mock.MagicMock()
service_builder = LBaaSv2ServiceBuilder(driver)
service_builder.driver.plugin.db.get_pools_and_healthmonitors = \
mock.MagicMock(return_value=(pools, monitors))
test_pools, test_monitors = \
service_builder._get_pools_and_healthmonitors(
context, loadbalancer)
for pool, test_pool, monitor in zip(pools, test_pools, monitors):
assert test_pool is pool
assert test_pool['healthmonitor_id'] == monitor['id']
def test_get_members(pools, members):
context = mock.MagicMock()
driver = mock.MagicMock()
subnet_map = mock.MagicMock()
network_map = mock.MagicMock()
service_builder = LBaaSv2ServiceBuilder(driver)
service_builder.driver.plugin.db._get_members = \
mock.MagicMock(return_value=members)
test_members = service_builder._get_members(context, pools,
subnet_map, network_map)
for test_member, member in zip(test_members, members):
assert test_member is member
def test__pool_to_dict():
driver = mock.MagicMock()
fake_pool = FakeDict()
fake_pool.members = []
fake_pool.l7_policies = []
sb = LBaaSv2ServiceBuilder(driver)
pool_dict = sb._pool_to_dict(fake_pool)
assert 'listener_id' not in pool_dict
assert 'listeners' not in pool_dict
| true
| true
|
1c4138a6563b0fd83e71c47c404940d1d2ad7586
| 1,495
|
py
|
Python
|
src/classifier/bidirectional_lstm.py
|
Franco7Scala/GeneratingNaturalLanguageAdversarialExamplesThroughParticleFiltering
|
095b47eb76503d44f54f701d303193328a5a4c86
|
[
"MIT"
] | null | null | null |
src/classifier/bidirectional_lstm.py
|
Franco7Scala/GeneratingNaturalLanguageAdversarialExamplesThroughParticleFiltering
|
095b47eb76503d44f54f701d303193328a5a4c86
|
[
"MIT"
] | 6
|
2020-01-28T23:09:44.000Z
|
2022-02-10T01:16:59.000Z
|
src/classifier/bidirectional_lstm.py
|
Franco7Scala/GeneratingNaturalLanguageAdversarialExamplesThroughParticleFiltering
|
095b47eb76503d44f54f701d303193328a5a4c86
|
[
"MIT"
] | null | null | null |
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.layers import Embedding, LSTM, Bidirectional
from src.classifier.model import Model
from src.support import support
class BidirectionalLstm(Model):
def __init__(self, phrase_manager, verbose = False):
super().__init__(phrase_manager)
self.name = "Bidirectional LSTM"
self.batch_size = phrase_manager.configuration[support.BLSTM_BATCH_SIZE]
self.epochs = phrase_manager.configuration[support.BLSTM_EPOCHS]
# model"s params
word_max_length = phrase_manager.configuration[support.WORD_MAX_LENGTH]
quantity_classes = phrase_manager.configuration[support.QUANTITY_CLASSES]
loss = phrase_manager.configuration[support.LOSS]
activation_last_layer = phrase_manager.configuration[support.ACTIVATION_LAST_LAYER]
embedding_dimensions = phrase_manager.configuration[support.BLSTM_EMBEDDING_DIMENSION]
quantity_words = phrase_manager.configuration[support.QUANTITY_WORDS]
support.colored_print("Building Bidirectional LSTM model...", "green", verbose)
self.model = Sequential()
self.model.add(Embedding(quantity_words, embedding_dimensions, input_length=word_max_length))
self.model.add(Bidirectional(LSTM(64)))
self.model.add(Dropout(0.5))
self.model.add(Dense(quantity_classes, activation=activation_last_layer))
self.model.compile("adam", loss, metrics=["accuracy"])
| 51.551724
| 101
| 0.755853
|
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.layers import Embedding, LSTM, Bidirectional
from src.classifier.model import Model
from src.support import support
class BidirectionalLstm(Model):
def __init__(self, phrase_manager, verbose = False):
super().__init__(phrase_manager)
self.name = "Bidirectional LSTM"
self.batch_size = phrase_manager.configuration[support.BLSTM_BATCH_SIZE]
self.epochs = phrase_manager.configuration[support.BLSTM_EPOCHS]
word_max_length = phrase_manager.configuration[support.WORD_MAX_LENGTH]
quantity_classes = phrase_manager.configuration[support.QUANTITY_CLASSES]
loss = phrase_manager.configuration[support.LOSS]
activation_last_layer = phrase_manager.configuration[support.ACTIVATION_LAST_LAYER]
embedding_dimensions = phrase_manager.configuration[support.BLSTM_EMBEDDING_DIMENSION]
quantity_words = phrase_manager.configuration[support.QUANTITY_WORDS]
support.colored_print("Building Bidirectional LSTM model...", "green", verbose)
self.model = Sequential()
self.model.add(Embedding(quantity_words, embedding_dimensions, input_length=word_max_length))
self.model.add(Bidirectional(LSTM(64)))
self.model.add(Dropout(0.5))
self.model.add(Dense(quantity_classes, activation=activation_last_layer))
self.model.compile("adam", loss, metrics=["accuracy"])
| true
| true
|
1c413957796520e1fe6868f379778a6905f36ca1
| 1,831
|
py
|
Python
|
loss/IQA/steer_pyr_space.py
|
milesgray/CALAE
|
a2ab2f7d9ee17cc6c24ff6ac370b0373537079ac
|
[
"Apache-2.0"
] | null | null | null |
loss/IQA/steer_pyr_space.py
|
milesgray/CALAE
|
a2ab2f7d9ee17cc6c24ff6ac370b0373537079ac
|
[
"Apache-2.0"
] | null | null | null |
loss/IQA/steer_pyr_space.py
|
milesgray/CALAE
|
a2ab2f7d9ee17cc6c24ff6ac370b0373537079ac
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
import numpy as np
from torchvision import transforms
from .utils import fspecial_gauss
from .steer_pyr_utils import sp5_filters
def corrDn(image, filt, step=1, channels=1):
filt_ = torch.from_numpy(filt).float().unsqueeze(0).unsqueeze(0).repeat(channels,1,1,1).to(image.device)
p = (filt_.shape[2]-1)//2
image = F.pad(image, (p,p,p,p),'reflect')
img = F.conv2d(image, filt_, stride=step, padding=0, groups = channels)
return img
def SteerablePyramidSpace(image, height=4, order=5, channels=1):
num_orientations = order + 1
filters = sp5_filters()
hi0 = corrDn(image, filters['hi0filt'], step=1, channels=channels)
pyr_coeffs = []
pyr_coeffs.append(hi0)
lo = corrDn(image, filters['lo0filt'], step=1, channels=channels)
for _ in range(height):
bfiltsz = int(np.floor(np.sqrt(filters['bfilts'].shape[0])))
for b in range(num_orientations):
filt = filters['bfilts'][:, b].reshape(bfiltsz, bfiltsz).T
band = corrDn(lo, filt, step=1, channels=channels)
pyr_coeffs.append(band)
lo = corrDn(lo, filters['lofilt'], step=2, channels=channels)
pyr_coeffs.append(lo)
return pyr_coeffs
if __name__ == '__main__':
from PIL import Image
import argparse
from utils import prepare_image
parser = argparse.ArgumentParser()
parser.add_argument('--ref', type=str, default='images/r0.png')
parser.add_argument('--dist', type=str, default='images/r1.png')
args = parser.parse_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
dist = prepare_image(Image.open(args.dist).convert("L"),repeatNum=1).to(device)
x = SteerablePyramidSpace(dist*255,channels=1)
c = 0
| 33.907407
| 108
| 0.678864
|
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
import numpy as np
from torchvision import transforms
from .utils import fspecial_gauss
from .steer_pyr_utils import sp5_filters
def corrDn(image, filt, step=1, channels=1):
filt_ = torch.from_numpy(filt).float().unsqueeze(0).unsqueeze(0).repeat(channels,1,1,1).to(image.device)
p = (filt_.shape[2]-1)//2
image = F.pad(image, (p,p,p,p),'reflect')
img = F.conv2d(image, filt_, stride=step, padding=0, groups = channels)
return img
def SteerablePyramidSpace(image, height=4, order=5, channels=1):
num_orientations = order + 1
filters = sp5_filters()
hi0 = corrDn(image, filters['hi0filt'], step=1, channels=channels)
pyr_coeffs = []
pyr_coeffs.append(hi0)
lo = corrDn(image, filters['lo0filt'], step=1, channels=channels)
for _ in range(height):
bfiltsz = int(np.floor(np.sqrt(filters['bfilts'].shape[0])))
for b in range(num_orientations):
filt = filters['bfilts'][:, b].reshape(bfiltsz, bfiltsz).T
band = corrDn(lo, filt, step=1, channels=channels)
pyr_coeffs.append(band)
lo = corrDn(lo, filters['lofilt'], step=2, channels=channels)
pyr_coeffs.append(lo)
return pyr_coeffs
if __name__ == '__main__':
from PIL import Image
import argparse
from utils import prepare_image
parser = argparse.ArgumentParser()
parser.add_argument('--ref', type=str, default='images/r0.png')
parser.add_argument('--dist', type=str, default='images/r1.png')
args = parser.parse_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
dist = prepare_image(Image.open(args.dist).convert("L"),repeatNum=1).to(device)
x = SteerablePyramidSpace(dist*255,channels=1)
c = 0
| true
| true
|
1c413965d0e055662eb365a9aa7fc2277dee946f
| 651
|
py
|
Python
|
darting/urls.py
|
nimiq/dartexa
|
3b173de6427fa4ab41e91530db300f75a85e8d25
|
[
"Apache-2.0"
] | null | null | null |
darting/urls.py
|
nimiq/dartexa
|
3b173de6427fa4ab41e91530db300f75a85e8d25
|
[
"Apache-2.0"
] | null | null | null |
darting/urls.py
|
nimiq/dartexa
|
3b173de6427fa4ab41e91530db300f75a85e8d25
|
[
"Apache-2.0"
] | null | null | null |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^test$', views.TestViewSet.as_view(), name='test'),
url(r'^game/cancel$', views.GameCancelViewSet.as_view(), name='game-cancel'),
url(r'^game$', views.GameViewSet.as_view(), name='game'),
url(r'^dart/cancel$', views.DartCancelViewSet.as_view(), name='dart-cancel'),
url(r'^dart$', views.DartViewSet.as_view(), name='dart'),
url(r'^status$', views.StatusViewSet.as_view(), name='status'),
url(r'^ui/paolo$', views.ui_paolo, name='ui-paolo'),
url(r'^ui/rodrigo$', views.ui_rodrigo, name='ui-rodrigo'),
url(r'^ui$', views.ui, name='ui'),
]
| 38.294118
| 81
| 0.651306
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^test$', views.TestViewSet.as_view(), name='test'),
url(r'^game/cancel$', views.GameCancelViewSet.as_view(), name='game-cancel'),
url(r'^game$', views.GameViewSet.as_view(), name='game'),
url(r'^dart/cancel$', views.DartCancelViewSet.as_view(), name='dart-cancel'),
url(r'^dart$', views.DartViewSet.as_view(), name='dart'),
url(r'^status$', views.StatusViewSet.as_view(), name='status'),
url(r'^ui/paolo$', views.ui_paolo, name='ui-paolo'),
url(r'^ui/rodrigo$', views.ui_rodrigo, name='ui-rodrigo'),
url(r'^ui$', views.ui, name='ui'),
]
| true
| true
|
1c413980b6420f9a09e74d3e4eadd2ce8263cf19
| 6,614
|
py
|
Python
|
cybo/models/stack_propagation_slu.py
|
bo-ke/cybo
|
612f30b0466b4ed6d04f5c2128b133367b55e576
|
[
"MIT"
] | 2
|
2020-12-23T15:58:13.000Z
|
2022-02-28T06:55:24.000Z
|
cybo/models/stack_propagation_slu.py
|
bo-ke/cybo
|
612f30b0466b4ed6d04f5c2128b133367b55e576
|
[
"MIT"
] | 7
|
2021-03-15T11:57:29.000Z
|
2021-05-14T03:31:12.000Z
|
cybo/models/stack_propagation_slu.py
|
bo-ke/cybo
|
612f30b0466b4ed6d04f5c2128b133367b55e576
|
[
"MIT"
] | 1
|
2022-03-01T17:47:22.000Z
|
2022-03-01T17:47:22.000Z
|
# -*- coding: utf-8 -*-
'''
@author: kebo
@contact: kebo0912@outlook.com
@version: 1.0
@file: stack_propagation_slu.py
@time: 2021/02/23 01:01:13
这一行开始写关于本文件的说明与解释
'''
import tensorflow as tf
from typing import Dict
from cybo.data.vocabulary import Vocabulary
from cybo.modules.attentions import SelfAttentionLayer
from cybo.losses.slu_loss import slu_loss_func
# from cybo.metrics.slu_overall_acc_metric import SluTokenLevelIntentOverallAcc
from cybo.metrics.nlu_acc_metric import NluAccMetric, Metric
from cybo.losses.token_classification_loss import TokenClassificationLoss
from cybo.models.model import Model
class StackPropagationSlu(Model):
def __init__(
self, embedding_dim, hidden_dim, dropout_rate,
vocab: Vocabulary, *args, **kwargs):
super().__init__(vocab=vocab, *args, **kwargs)
_vocab_size = self._vocab.get_vocab_size("text")
_intent_size = self._vocab.get_vocab_size("intent")
_slot_size = self._vocab.get_vocab_size("tags")
self.embedding = tf.keras.layers.Embedding(
_vocab_size, embedding_dim, mask_zero=True)
self.bi_lstm = tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(hidden_dim, return_sequences=True))
self.attention_layer = SelfAttentionLayer(
hidden_dim=1024, output_dim=128,
dropout_rate=dropout_rate)
self.dropout1 = tf.keras.layers.Dropout(rate=dropout_rate)
self.concat = tf.keras.layers.Concatenate()
self.intent_decoder_cell = tf.keras.layers.LSTMCell(units=64)
self.slot_decoder_cell = tf.keras.layers.LSTMCell(units=64)
self.intent_decoder_dropout = tf.keras.layers.Dropout(
rate=dropout_rate)
self.slot_decoder_dropout = tf.keras.layers.Dropout(rate=dropout_rate)
self.intent_liner_layer = tf.keras.layers.Dense(
units=_intent_size)
self.slot_liner_layer = tf.keras.layers.Dense(
units=_slot_size)
self.intent_embedding = tf.keras.layers.Embedding(_intent_size, 8)
self.slot_embedding = tf.keras.layers.Embedding(_slot_size, 32)
self._intent_loss = TokenClassificationLoss()
self._slot_loss = TokenClassificationLoss()
def init_metrics(self) -> Dict[str, Metric]:
return {"nlu_acc": NluAccMetric()}
# @tf.function()
def call(self, input_ids, intent_ids=None, tags_ids=None, mask=None,
training=True):
x = self.embedding(input_ids) # (b, s, e)
x = self.dropout1(x, training=training)
h = self.bi_lstm(x) # (b, s, 2e)
c = self.attention_layer(h) # (b, s, 2e)
e = self.concat([h, c])
# intent_decoder
_intent_h_state, _intent_c_state = tf.zeros(
[x.shape[0], 64]), tf.zeros([x.shape[0], 64])
# (b, 64)
_slot_h_state, _slot_c_state = tf.zeros(
[x.shape[0], 64]), tf.zeros([x.shape[0], 64])
# (b, 64)
# https://stackoverflow.com/questions/64567161/tensorflow-cannot-be-accessed-here-it-is-defined-in-another-function-or-code-b
y_intent, y_slot = tf.TensorArray(
dtype=tf.float32, size=0, dynamic_size=True), tf.TensorArray(
dtype=tf.float32, size=0, dynamic_size=True)
# y_intent, y_slot = [], []
prev_intent_tensor = tf.zeros([x.shape[0], 8])
prev_slot_tensor = tf.zeros([x.shape[0], 32])
for i in tf.range(x.shape[1]):
_hidden = e[:, i, :]
_intent_hidden = tf.concat([_hidden, prev_intent_tensor], axis=-1)
# 添加dropout
_intent_hidden = self.intent_decoder_dropout(
_intent_hidden, training=training)
_intent_h_state, (_intent_h_state, _intent_c_state) = self.intent_decoder_cell(
_intent_hidden, states=[_intent_h_state, _intent_c_state])
_h_intent_i = self.intent_liner_layer(_intent_h_state)
y_intent = y_intent.write(i, _h_intent_i)
# y_intent.append(_h_intent_i)
prev_intent_tensor = self.intent_embedding(
tf.argmax(_h_intent_i, axis=-1))
# slot_decoder
_slot_hidden = tf.concat(
[_hidden, _h_intent_i, prev_slot_tensor],
axis=-1)
# 添加dropout
_slot_hidden = self.slot_decoder_dropout(
_slot_hidden, training=training)
_slot_h_state, (_slot_h_state, _slot_c_state) = self.slot_decoder_cell(
_slot_hidden, states=[_slot_h_state, _slot_c_state])
_h_slot_i = self.slot_liner_layer(_slot_h_state)
y_slot = y_slot.write(i, _h_slot_i)
# y_slot.append(_h_slot_i)
prev_slot_tensor = self.slot_embedding(
tf.argmax(_h_slot_i, axis=-1))
# 注意不可用reshape transpose与reshape结果是不一样的
# 错误写法: tf.reshape(y_intent.stack(), [x.shape[0], x.shape[1], -1])
y_intent = tf.transpose(y_intent.stack(), [1, 0, 2])
y_slot = tf.transpose(y_slot.stack(), [1, 0, 2])
o_intent = self.get_o_intent(intent_pred=y_intent, mask=x._keras_mask)
output_dict = {"intent_logits": o_intent, "slot_logits": y_slot}
if intent_ids is not None and tags_ids is not None:
_intent_ids = tf.broadcast_to(intent_ids, tags_ids.shape)
active_loss = tags_ids != -100
_intent_loss = self._intent_loss.compute_loss(
y_true=tf.boolean_mask(_intent_ids, active_loss),
y_pred=tf.boolean_mask(y_intent, active_loss))
_slot_loss = self._slot_loss.compute_loss(
y_true=tags_ids, y_pred=y_slot)
output_dict["loss"] = _intent_loss + _slot_loss
self._metrics["nlu_acc"].update_state(
y_true=[intent_ids, tags_ids],
y_pred=[o_intent, y_slot])
return output_dict
@staticmethod
def get_o_intent(intent_pred, mask):
mask = tf.cast(mask, dtype=tf.int32)
o_intent = tf.argmax(intent_pred, axis=-1)
seq_lengths = tf.reduce_sum(mask, axis=-1)
# 取token_level_intent most_common 作为query intent
# https://www.tensorflow.org/api_docs/python/tf/unique_with_counts
def get_max_count_intent(_intent):
_y, _idx, _count = tf.unique_with_counts(_intent)
_intent = _y[tf.argmax(_count)]
return [_intent]
o_intent = tf.convert_to_tensor(
[get_max_count_intent(o_intent[i][: seq_lengths[i]])
for i in range(len(seq_lengths))], dtype=tf.int32)
return o_intent
| 42.397436
| 133
| 0.642274
|
import tensorflow as tf
from typing import Dict
from cybo.data.vocabulary import Vocabulary
from cybo.modules.attentions import SelfAttentionLayer
from cybo.losses.slu_loss import slu_loss_func
from cybo.metrics.nlu_acc_metric import NluAccMetric, Metric
from cybo.losses.token_classification_loss import TokenClassificationLoss
from cybo.models.model import Model
class StackPropagationSlu(Model):
def __init__(
self, embedding_dim, hidden_dim, dropout_rate,
vocab: Vocabulary, *args, **kwargs):
super().__init__(vocab=vocab, *args, **kwargs)
_vocab_size = self._vocab.get_vocab_size("text")
_intent_size = self._vocab.get_vocab_size("intent")
_slot_size = self._vocab.get_vocab_size("tags")
self.embedding = tf.keras.layers.Embedding(
_vocab_size, embedding_dim, mask_zero=True)
self.bi_lstm = tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(hidden_dim, return_sequences=True))
self.attention_layer = SelfAttentionLayer(
hidden_dim=1024, output_dim=128,
dropout_rate=dropout_rate)
self.dropout1 = tf.keras.layers.Dropout(rate=dropout_rate)
self.concat = tf.keras.layers.Concatenate()
self.intent_decoder_cell = tf.keras.layers.LSTMCell(units=64)
self.slot_decoder_cell = tf.keras.layers.LSTMCell(units=64)
self.intent_decoder_dropout = tf.keras.layers.Dropout(
rate=dropout_rate)
self.slot_decoder_dropout = tf.keras.layers.Dropout(rate=dropout_rate)
self.intent_liner_layer = tf.keras.layers.Dense(
units=_intent_size)
self.slot_liner_layer = tf.keras.layers.Dense(
units=_slot_size)
self.intent_embedding = tf.keras.layers.Embedding(_intent_size, 8)
self.slot_embedding = tf.keras.layers.Embedding(_slot_size, 32)
self._intent_loss = TokenClassificationLoss()
self._slot_loss = TokenClassificationLoss()
def init_metrics(self) -> Dict[str, Metric]:
return {"nlu_acc": NluAccMetric()}
def call(self, input_ids, intent_ids=None, tags_ids=None, mask=None,
training=True):
x = self.embedding(input_ids)
x = self.dropout1(x, training=training)
h = self.bi_lstm(x)
c = self.attention_layer(h)
e = self.concat([h, c])
_intent_h_state, _intent_c_state = tf.zeros(
[x.shape[0], 64]), tf.zeros([x.shape[0], 64])
_slot_h_state, _slot_c_state = tf.zeros(
[x.shape[0], 64]), tf.zeros([x.shape[0], 64])
y_intent, y_slot = tf.TensorArray(
dtype=tf.float32, size=0, dynamic_size=True), tf.TensorArray(
dtype=tf.float32, size=0, dynamic_size=True)
prev_intent_tensor = tf.zeros([x.shape[0], 8])
prev_slot_tensor = tf.zeros([x.shape[0], 32])
for i in tf.range(x.shape[1]):
_hidden = e[:, i, :]
_intent_hidden = tf.concat([_hidden, prev_intent_tensor], axis=-1)
_intent_hidden = self.intent_decoder_dropout(
_intent_hidden, training=training)
_intent_h_state, (_intent_h_state, _intent_c_state) = self.intent_decoder_cell(
_intent_hidden, states=[_intent_h_state, _intent_c_state])
_h_intent_i = self.intent_liner_layer(_intent_h_state)
y_intent = y_intent.write(i, _h_intent_i)
prev_intent_tensor = self.intent_embedding(
tf.argmax(_h_intent_i, axis=-1))
_slot_hidden = tf.concat(
[_hidden, _h_intent_i, prev_slot_tensor],
axis=-1)
_slot_hidden = self.slot_decoder_dropout(
_slot_hidden, training=training)
_slot_h_state, (_slot_h_state, _slot_c_state) = self.slot_decoder_cell(
_slot_hidden, states=[_slot_h_state, _slot_c_state])
_h_slot_i = self.slot_liner_layer(_slot_h_state)
y_slot = y_slot.write(i, _h_slot_i)
prev_slot_tensor = self.slot_embedding(
tf.argmax(_h_slot_i, axis=-1))
y_intent = tf.transpose(y_intent.stack(), [1, 0, 2])
y_slot = tf.transpose(y_slot.stack(), [1, 0, 2])
o_intent = self.get_o_intent(intent_pred=y_intent, mask=x._keras_mask)
output_dict = {"intent_logits": o_intent, "slot_logits": y_slot}
if intent_ids is not None and tags_ids is not None:
_intent_ids = tf.broadcast_to(intent_ids, tags_ids.shape)
active_loss = tags_ids != -100
_intent_loss = self._intent_loss.compute_loss(
y_true=tf.boolean_mask(_intent_ids, active_loss),
y_pred=tf.boolean_mask(y_intent, active_loss))
_slot_loss = self._slot_loss.compute_loss(
y_true=tags_ids, y_pred=y_slot)
output_dict["loss"] = _intent_loss + _slot_loss
self._metrics["nlu_acc"].update_state(
y_true=[intent_ids, tags_ids],
y_pred=[o_intent, y_slot])
return output_dict
@staticmethod
def get_o_intent(intent_pred, mask):
mask = tf.cast(mask, dtype=tf.int32)
o_intent = tf.argmax(intent_pred, axis=-1)
seq_lengths = tf.reduce_sum(mask, axis=-1)
def get_max_count_intent(_intent):
_y, _idx, _count = tf.unique_with_counts(_intent)
_intent = _y[tf.argmax(_count)]
return [_intent]
o_intent = tf.convert_to_tensor(
[get_max_count_intent(o_intent[i][: seq_lengths[i]])
for i in range(len(seq_lengths))], dtype=tf.int32)
return o_intent
| true
| true
|
1c4139ff77d48f0fbf1230a6b53a26872d8dacaf
| 1,756
|
py
|
Python
|
var/spack/repos/builtin/packages/gitconddb/package.py
|
kkauder/spack
|
6ae8d5c380c1f42094b05d38be26b03650aafb39
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2019-02-10T13:47:48.000Z
|
2019-04-17T13:05:17.000Z
|
var/spack/repos/builtin/packages/gitconddb/package.py
|
kkauder/spack
|
6ae8d5c380c1f42094b05d38be26b03650aafb39
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 32
|
2020-12-15T17:29:20.000Z
|
2022-03-21T15:08:31.000Z
|
var/spack/repos/builtin/packages/gitconddb/package.py
|
kkauder/spack
|
6ae8d5c380c1f42094b05d38be26b03650aafb39
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2021-07-19T20:31:27.000Z
|
2021-07-19T21:14:14.000Z
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Gitconddb(CMakePackage):
"""Conditions Database library using a Git repository as the
storage backend"""
homepage = "https://gitlab.cern.ch/lhcb/GitCondDB"
url = "https://gitlab.cern.ch/lhcb/GitCondDB/-/archive/0.1.1/GitCondDB-0.1.1.tar.gz"
git = "https://gitlab.cern.ch/lhcb/GitCondDB.git"
maintainers = ['drbenmorgan']
version('master', branch='master')
version('0.1.1', sha256='024a6867722a3a622ed4327ea7d15641dd48e4e8411bdcc21915e406b3c479a2')
# Add the cxxstd variant for forward compatibility, though we require 17
_cxxstd_values = ('17',)
variant('cxxstd',
default='17',
values=_cxxstd_values,
multi=False,
description='Use the specified C++ standard when building.')
depends_on('cmake@3.10:', type='build')
depends_on('pkgconfig', type='build')
depends_on('nlohmann-json@3.2.0:', type='build')
depends_on('googletest@1.8.1:', type='build')
for s in _cxxstd_values:
depends_on('fmt@5.2.0: cxxstd=' + s, when='cxxstd=' + s)
# Maybe also a boost dependency for macOS older than catalina
depends_on('libgit2')
# Known conflicts on C++17 compatibility (aggressive for now)
conflicts('%gcc@:7.9.999', msg="GitCondDB requires GCC 8 or newer for C++17 support")
conflicts('%apple-clang', when="@:0.1.99", msg="No Darwin support for clang in older versions")
conflicts('%clang platform=darwin', when="@:0.1.99", msg="No Darwin support for clang in older versions")
| 39.022222
| 109
| 0.677107
|
from spack import *
class Gitconddb(CMakePackage):
homepage = "https://gitlab.cern.ch/lhcb/GitCondDB"
url = "https://gitlab.cern.ch/lhcb/GitCondDB/-/archive/0.1.1/GitCondDB-0.1.1.tar.gz"
git = "https://gitlab.cern.ch/lhcb/GitCondDB.git"
maintainers = ['drbenmorgan']
version('master', branch='master')
version('0.1.1', sha256='024a6867722a3a622ed4327ea7d15641dd48e4e8411bdcc21915e406b3c479a2')
_cxxstd_values = ('17',)
variant('cxxstd',
default='17',
values=_cxxstd_values,
multi=False,
description='Use the specified C++ standard when building.')
depends_on('cmake@3.10:', type='build')
depends_on('pkgconfig', type='build')
depends_on('nlohmann-json@3.2.0:', type='build')
depends_on('googletest@1.8.1:', type='build')
for s in _cxxstd_values:
depends_on('fmt@5.2.0: cxxstd=' + s, when='cxxstd=' + s)
depends_on('libgit2')
conflicts('%gcc@:7.9.999', msg="GitCondDB requires GCC 8 or newer for C++17 support")
conflicts('%apple-clang', when="@:0.1.99", msg="No Darwin support for clang in older versions")
conflicts('%clang platform=darwin', when="@:0.1.99", msg="No Darwin support for clang in older versions")
| true
| true
|
1c413af4262f84accf1244bd172729c680cf9f69
| 2,565
|
py
|
Python
|
src/leetcode_36_valid_sudoku.py
|
sungho-joo/leetcode2github
|
ce7730ef40f6051df23681dd3c0e1e657abba620
|
[
"MIT"
] | null | null | null |
src/leetcode_36_valid_sudoku.py
|
sungho-joo/leetcode2github
|
ce7730ef40f6051df23681dd3c0e1e657abba620
|
[
"MIT"
] | null | null | null |
src/leetcode_36_valid_sudoku.py
|
sungho-joo/leetcode2github
|
ce7730ef40f6051df23681dd3c0e1e657abba620
|
[
"MIT"
] | null | null | null |
# @l2g 36 python3
# [36] Valid Sudoku
# Difficulty: Medium
# https://leetcode.com/problems/valid-sudoku
#
# Determine if a 9 x 9 Sudoku board is valid.
# Only the filled cells need to be validated according to the following rules:
#
# Each row must contain the digits 1-9 without repetition.
# Each column must contain the digits 1-9 without repetition.
# Each of the nine 3 x 3 sub-boxes of the grid must contain the digits 1-9 without repetition.
#
# Note:
#
# A Sudoku board (partially filled) could be valid but is not necessarily solvable.
# Only the filled cells need to be validated according to the mentioned rules.
#
#
# Example 1:
#
#
# Input: board =
# [["5","3",".",".","7",".",".",".","."]
# ,["6",".",".","1","9","5",".",".","."]
# ,[".","9","8",".",".",".",".","6","."]
# ,["8",".",".",".","6",".",".",".","3"]
# ,["4",".",".","8",".","3",".",".","1"]
# ,["7",".",".",".","2",".",".",".","6"]
# ,[".","6",".",".",".",".","2","8","."]
# ,[".",".",".","4","1","9",".",".","5"]
# ,[".",".",".",".","8",".",".","7","9"]]
# Output: true
#
# Example 2:
#
# Input: board =
# [["8","3",".",".","7",".",".",".","."]
# ,["6",".",".","1","9","5",".",".","."]
# ,[".","9","8",".",".",".",".","6","."]
# ,["8",".",".",".","6",".",".",".","3"]
# ,["4",".",".","8",".","3",".",".","1"]
# ,["7",".",".",".","2",".",".",".","6"]
# ,[".","6",".",".",".",".","2","8","."]
# ,[".",".",".","4","1","9",".",".","5"]
# ,[".",".",".",".","8",".",".","7","9"]]
# Output: false
# Explanation: Same as Example 1,except with the 5 in the top left corner being modified to 8.
# Since there are two 8's in the top left 3x3 sub-box,it is invalid.
#
#
# Constraints:
#
# board.length == 9
# board[i].length == 9
# board[i][j] is a digit 1-9 or '.'.
#
#
from typing import List
class Solution:
def isValidSudoku(self, board: List[List[str]]) -> bool:
def is_valid(arr):
valid_arr = [elem for elem in arr if elem != "."]
return len(set(valid_arr)) == len(valid_arr)
# row
for row in board:
if not is_valid(row):
return False
# col
for col in zip(*board):
if not is_valid(col):
return False
# Box
for i in [0, 3, 6]:
for j in [0, 3, 6]:
box = [board[i + x][j + y] for x in range(3) for y in range(3)]
if not is_valid(box):
return False
return True
if __name__ == "__main__":
import os
import pytest
pytest.main([os.path.join("tests", "test_36.py")])
| 27.287234
| 94
| 0.45731
|
#
#
# Constraints:
#
# board.length == 9
# board[i].length == 9
# board[i][j] is a digit 1-9 or '.'.
#
#
from typing import List
class Solution:
def isValidSudoku(self, board: List[List[str]]) -> bool:
def is_valid(arr):
valid_arr = [elem for elem in arr if elem != "."]
return len(set(valid_arr)) == len(valid_arr)
# row
for row in board:
if not is_valid(row):
return False
# col
for col in zip(*board):
if not is_valid(col):
return False
# Box
for i in [0, 3, 6]:
for j in [0, 3, 6]:
box = [board[i + x][j + y] for x in range(3) for y in range(3)]
if not is_valid(box):
return False
return True
if __name__ == "__main__":
import os
import pytest
pytest.main([os.path.join("tests", "test_36.py")])
| true
| true
|
1c413bb45d99abef96e76ab9a9f6eb21cb80b695
| 4,522
|
py
|
Python
|
drf_tutorial/settings.py
|
yaowuya/drf_tutorial
|
ce9de1c28852e4c7b969d4458f10203333f562bc
|
[
"MIT"
] | null | null | null |
drf_tutorial/settings.py
|
yaowuya/drf_tutorial
|
ce9de1c28852e4c7b969d4458f10203333f562bc
|
[
"MIT"
] | null | null | null |
drf_tutorial/settings.py
|
yaowuya/drf_tutorial
|
ce9de1c28852e4c7b969d4458f10203333f562bc
|
[
"MIT"
] | null | null | null |
"""
Django settings for drf_tutorial project.
Generated by 'django-admin startproject' using Django 2.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'l^#vlh@i1n8ok=u*-t6k-^rtc1e!y+wl$=87x7be#7jbcj-2-7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework', # RESTFUL API
'rest_framework.authtoken', # drf 自带token认证
'course.apps.CourseConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'drf_tutorial.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'drf_tutorial.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.mysql",
"NAME": "drf_tutorial", # noqa
"USER": "root",
"PASSWORD": "root",
"HOST": "localhost",
"PORT": "3306",
# 单元测试 DB 配置,建议不改动
"TEST": {"NAME": "test_db", "CHARSET": "utf8", "COLLATION": "utf8_general_ci"},
},
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "staticfiles"),
]
# DRF的全局配置
REST_FRAMEWORK = {
"DEFAULT_SCHEMA_CLASS": "rest_framework.schemas.coreapi.AutoSchema",
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.PageNumberPagination",
"PAGE_SIZE": 10,
"DATETIME_FORMAT": "%Y-%m-%d %H:%M:%S",
"DEFAULT_RENDERER_CLASSES": [
"rest_framework.renderers.JSONRenderer",
"rest_framework.renderers.BrowsableAPIRenderer",
],
"DEFAULT_PARSER_CLASSES": [ # 解析request.data
"rest_framework.parsers.JSONParser",
"rest_framework.parsers.FormParser",
"rest_framework.parsers.MultiPartParser",
],
"DEFAULT_PERMISSION_CLASSES": [
"rest_framework.permissions.IsAuthenticated",
],
"DEFAULT_AUTHENTICATION_CLASSES": [
"rest_framework.authentication.BasicAuthentication",
"rest_framework.authentication.SessionAuthentication",
"rest_framework.authentication.TokenAuthentication",
]
}
| 28.987179
| 91
| 0.687307
|
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'l^#vlh@i1n8ok=u*-t6k-^rtc1e!y+wl$=87x7be#7jbcj-2-7'
DEBUG = True
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework', # RESTFUL API
'rest_framework.authtoken', # drf 自带token认证
'course.apps.CourseConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'drf_tutorial.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'drf_tutorial.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.mysql",
"NAME": "drf_tutorial", # noqa
"USER": "root",
"PASSWORD": "root",
"HOST": "localhost",
"PORT": "3306",
# 单元测试 DB 配置,建议不改动
"TEST": {"NAME": "test_db", "CHARSET": "utf8", "COLLATION": "utf8_general_ci"},
},
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "staticfiles"),
]
# DRF的全局配置
REST_FRAMEWORK = {
"DEFAULT_SCHEMA_CLASS": "rest_framework.schemas.coreapi.AutoSchema",
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.PageNumberPagination",
"PAGE_SIZE": 10,
"DATETIME_FORMAT": "%Y-%m-%d %H:%M:%S",
"DEFAULT_RENDERER_CLASSES": [
"rest_framework.renderers.JSONRenderer",
"rest_framework.renderers.BrowsableAPIRenderer",
],
"DEFAULT_PARSER_CLASSES": [ # 解析request.data
"rest_framework.parsers.JSONParser",
"rest_framework.parsers.FormParser",
"rest_framework.parsers.MultiPartParser",
],
"DEFAULT_PERMISSION_CLASSES": [
"rest_framework.permissions.IsAuthenticated",
],
"DEFAULT_AUTHENTICATION_CLASSES": [
"rest_framework.authentication.BasicAuthentication",
"rest_framework.authentication.SessionAuthentication",
"rest_framework.authentication.TokenAuthentication",
]
}
| true
| true
|
1c413bbdf77a3b942aec26fd0f68d3e30879bc9b
| 1,387
|
py
|
Python
|
bases/decorators.py
|
ysabel31/Apprendre-Python
|
0ab3ca7bcd90952ba1f788802fda9f2f94dde2d5
|
[
"MIT"
] | null | null | null |
bases/decorators.py
|
ysabel31/Apprendre-Python
|
0ab3ca7bcd90952ba1f788802fda9f2f94dde2d5
|
[
"MIT"
] | null | null | null |
bases/decorators.py
|
ysabel31/Apprendre-Python
|
0ab3ca7bcd90952ba1f788802fda9f2f94dde2d5
|
[
"MIT"
] | 2
|
2017-11-15T10:39:35.000Z
|
2020-01-14T17:44:43.000Z
|
# fonctions appelé avant autre fonctions
# @classmethod, #static_method ....
import functools
# The functools module is for higher-order functions :
# functions that act on or return other functions.
# In general, any callable object can be treated as a function for
# the purposes of this module.
def my_decorator(func):
# This is a convenience function for
# invoking update_wrapper() as a function decorator
# when defining a wrapper function
# functools.update_wrapper = Update a wrapper function to look like
# the wrapped function.
@functools.wraps(func)
def func_that_runs_func():
print("In the decorator")
func()
print("After the decorator")
return func_that_runs_func
@my_decorator
def my_function():
print("I'm the function")
# my_function()
def decorator_with_arguments(number):
def my_decorator_2(func):
@functools.wraps(func)
def func_that_runs_func_2(*args, **kwargs):
print("In the decorator")
if number == 56:
print("Not running the function")
else:
#run my_function_too
func(*args, **kwargs)
print("After the decorator")
return func_that_runs_func_2
return my_decorator_2
@decorator_with_arguments(57)
def my_function_too(x,y):
print(x+y)
my_function_too(57, 67)
| 28.895833
| 72
| 0.666907
|
def my_decorator(func):
@functools.wraps(func)
def func_that_runs_func():
print("In the decorator")
func()
print("After the decorator")
return func_that_runs_func
@my_decorator
def my_function():
print("I'm the function")
# my_function()
def decorator_with_arguments(number):
def my_decorator_2(func):
@functools.wraps(func)
def func_that_runs_func_2(*args, **kwargs):
print("In the decorator")
if number == 56:
print("Not running the function")
else:
#run my_function_too
func(*args, **kwargs)
print("After the decorator")
return func_that_runs_func_2
return my_decorator_2
@decorator_with_arguments(57)
def my_function_too(x,y):
print(x+y)
my_function_too(57, 67)
| true
| true
|
1c413bcdfc7beb7637423c2b7d843368d4d74299
| 1,262
|
py
|
Python
|
test/functional/rpc_deprecated.py
|
bitcoin-black/bitcoinblack
|
ad87d315c635ef439d4664da46e6672153f91b79
|
[
"MIT"
] | null | null | null |
test/functional/rpc_deprecated.py
|
bitcoin-black/bitcoinblack
|
ad87d315c635ef439d4664da46e6672153f91b79
|
[
"MIT"
] | null | null | null |
test/functional/rpc_deprecated.py
|
bitcoin-black/bitcoinblack
|
ad87d315c635ef439d4664da46e6672153f91b79
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2017 The DigiByte Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test deprecation of RPC calls."""
from test_framework.test_framework import DigiByteTestFramework
from test_framework.util import assert_raises_rpc_error
class DeprecatedRpcTest(DigiByteTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [[], ["-deprecatedrpc=estimatefee", "-deprecatedrpc=createmultisig"]]
def run_test(self):
self.log.info("estimatefee: Shows deprecated message")
assert_raises_rpc_error(-32, 'estimatefee is deprecated', self.nodes[0].estimatefee, 1)
self.log.info("Using -deprecatedrpc=estimatefee bypasses the error")
self.nodes[1].estimatefee(1)
self.log.info("Make sure that -deprecatedrpc=createmultisig allows it to take addresses")
assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 1, [self.nodes[0].getnewaddress()])
self.nodes[1].createmultisig(1, [self.nodes[1].getnewaddress()])
if __name__ == '__main__':
DeprecatedRpcTest().main()
| 45.071429
| 123
| 0.731379
|
from test_framework.test_framework import DigiByteTestFramework
from test_framework.util import assert_raises_rpc_error
class DeprecatedRpcTest(DigiByteTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [[], ["-deprecatedrpc=estimatefee", "-deprecatedrpc=createmultisig"]]
def run_test(self):
self.log.info("estimatefee: Shows deprecated message")
assert_raises_rpc_error(-32, 'estimatefee is deprecated', self.nodes[0].estimatefee, 1)
self.log.info("Using -deprecatedrpc=estimatefee bypasses the error")
self.nodes[1].estimatefee(1)
self.log.info("Make sure that -deprecatedrpc=createmultisig allows it to take addresses")
assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 1, [self.nodes[0].getnewaddress()])
self.nodes[1].createmultisig(1, [self.nodes[1].getnewaddress()])
if __name__ == '__main__':
DeprecatedRpcTest().main()
| true
| true
|
1c413c5d5a60062c9f2ecc2464566dd0eac8b1b6
| 1,752
|
py
|
Python
|
ClassifiedProject/backend/cases/migrations/0002_remove_module_describe_testcase.py
|
PatrickHXH/Test-Development
|
f7a24dceb136597f7a59440299eb3184f98f7a92
|
[
"Apache-2.0"
] | null | null | null |
ClassifiedProject/backend/cases/migrations/0002_remove_module_describe_testcase.py
|
PatrickHXH/Test-Development
|
f7a24dceb136597f7a59440299eb3184f98f7a92
|
[
"Apache-2.0"
] | null | null | null |
ClassifiedProject/backend/cases/migrations/0002_remove_module_describe_testcase.py
|
PatrickHXH/Test-Development
|
f7a24dceb136597f7a59440299eb3184f98f7a92
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 4.0.3 on 2022-04-12 12:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('cases', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='module',
name='describe',
),
migrations.CreateModel(
name='TestCase',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='名称')),
('url', models.TextField(verbose_name='URL')),
('method', models.CharField(max_length=10, verbose_name='请求方法')),
('header', models.TextField(default='{}', null=True, verbose_name='请求头')),
('params_type', models.CharField(max_length=10, verbose_name='参数类型')),
('params_body', models.TextField(default='{}', null=True, verbose_name='参数内容')),
('response', models.TextField(default='{}', null=True, verbose_name='响应')),
('assert_type', models.CharField(max_length=10, null=True, verbose_name='断言类型')),
('assert_text', models.TextField(default='{}', null=True, verbose_name='断言结果')),
('is_delete', models.BooleanField(default=False, verbose_name='状态')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('module', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cases.module')),
],
),
]
| 46.105263
| 117
| 0.594178
|
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('cases', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='module',
name='describe',
),
migrations.CreateModel(
name='TestCase',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='名称')),
('url', models.TextField(verbose_name='URL')),
('method', models.CharField(max_length=10, verbose_name='请求方法')),
('header', models.TextField(default='{}', null=True, verbose_name='请求头')),
('params_type', models.CharField(max_length=10, verbose_name='参数类型')),
('params_body', models.TextField(default='{}', null=True, verbose_name='参数内容')),
('response', models.TextField(default='{}', null=True, verbose_name='响应')),
('assert_type', models.CharField(max_length=10, null=True, verbose_name='断言类型')),
('assert_text', models.TextField(default='{}', null=True, verbose_name='断言结果')),
('is_delete', models.BooleanField(default=False, verbose_name='状态')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('module', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cases.module')),
],
),
]
| true
| true
|
1c413c5f52342ec88b2d09dfbb1f0155dcbf36ed
| 6,421
|
py
|
Python
|
opal_common/authentication/signer.py
|
NateKat/opal
|
6906efc793eb02da894e186484e4f4f0b18f985c
|
[
"Apache-2.0"
] | null | null | null |
opal_common/authentication/signer.py
|
NateKat/opal
|
6906efc793eb02da894e186484e4f4f0b18f985c
|
[
"Apache-2.0"
] | null | null | null |
opal_common/authentication/signer.py
|
NateKat/opal
|
6906efc793eb02da894e186484e4f4f0b18f985c
|
[
"Apache-2.0"
] | null | null | null |
from datetime import datetime, timedelta
from typing import Optional
from uuid import UUID
import jwt
from fastapi import HTTPException, status
from jwt.algorithms import Algorithm, get_default_algorithms
from jwt.api_jwk import PyJWK
from opal_common.authentication.types import JWTAlgorithm, JWTClaims, PrivateKey, PublicKey
from opal_common.logger import logger
class Unauthorized(HTTPException):
"""
HTTP 401 Unauthorized exception.
"""
def __init__(self, description="Bearer token is not valid!", **kwargs):
super().__init__(
status_code=status.HTTP_401_UNAUTHORIZED,
detail={
"error": description,
**kwargs
},
headers={"WWW-Authenticate": "Bearer"}
)
class InvalidJWTCryptoKeysException(Exception):
"""
raised when JWT signer provided with invalid crypto keys
"""
pass
class JWTSigner:
"""
given cryptographic keys, signs and verifies jwt tokens.
"""
def __init__(
self,
private_key: Optional[PrivateKey],
public_key: Optional[PublicKey],
algorithm: JWTAlgorithm,
audience: str,
issuer: str,
):
"""
inits the signer if and only if the keys provided to __init__
were generate together are are valid. otherwise will throw.
JWT signer can be initialized with empty keys (None),
in which case signer.enabled == False.
This allows opal to run both in secure mode (which keys, requires jwt authentication)
and in insecure mode (good for development and running locally).
Args:
private_key (PrivateKey): a valid private key or None
public_key (PublicKey): a valid public key or None
algorithm (JWTAlgorithm): the jwt algorithm to use
(possible values: https://pyjwt.readthedocs.io/en/stable/algorithms.html)
audience (string): the value for the aud claim: https://tools.ietf.org/html/rfc7519#section-4.1.3
issuer (string): the value for the iss claim: https://tools.ietf.org/html/rfc7519#section-4.1.1
"""
self._private_key = private_key
self._public_key = public_key
self._algorithm: str = algorithm.value
self._audience = audience
self._issuer = issuer
self._enabled = True
self._verify_crypto_keys()
def _verify_crypto_keys(self):
"""
verifies whether or not valid crypto keys were provided to the signer.
if both keys are valid, encodes and decodes a JWT to make sure the keys match.
if both private and public keys are valid and are matching => signer is enabled
if both private and public keys are None => signer is disabled (self.enabled == False)
if only one key is valid/not-None => throws ValueError
any other case => throws ValueError
"""
if self._private_key is not None and self._public_key is not None:
# both keys provided, let's make sure these keys were generated correctly
token = jwt.encode({"some": "payload"}, self._private_key, algorithm=self._algorithm)
try:
jwt.decode(token, self._public_key, algorithms=[self._algorithm])
except jwt.PyJWTError as exc:
logger.info("JWT Signer key verification failed with error: {err}", err=repr(exc))
raise InvalidJWTCryptoKeysException("private key and public key do not match!") from exc
# save jwk
self._jwk: PyJWK = PyJWK.from_json(self.get_jwk(), algorithm=self._algorithm)
elif (self._private_key != self._public_key) and (self._private_key is None or self._public_key is None):
raise ValueError("JWT Signer not valid, only one of private key / public key pair was provided!")
elif self._private_key is None and self._public_key is None:
# valid situation, running in dev mode and api security is off
self._enabled = False
logger.info("OPAL was not provided with JWT encryption keys, cannot verify api requests!")
else:
raise ValueError("Invalid JWT Signer input!")
def get_jwk(self) -> str:
"""
returns the jwk json contents
"""
algorithm: Optional[Algorithm] = get_default_algorithms().get(self._algorithm)
if algorithm is None:
raise ValueError(f"invalid jwt algorithm: {self._algorithm}")
return algorithm.to_jwk(self._public_key)
@property
def enabled(self):
"""
whether or not the signer has valid cryptographic keys
"""
return self._enabled
def verify(self, token: str) -> JWTClaims:
"""
verifies a JWT token is valid.
if valid returns dict with jwt claims, otherwise throws.
"""
try:
return jwt.decode(
token,
self._public_key,
algorithms=[self._algorithm],
audience=self._audience,
issuer=self._issuer
)
except jwt.ExpiredSignatureError:
raise Unauthorized(token=token, description="Access token is expired")
except jwt.InvalidAudienceError:
raise Unauthorized(token=token, description="Invalid access token: invalid audience claim")
except jwt.InvalidIssuerError:
raise Unauthorized(token=token, description="Invalid access token: invalid issuer claim")
except jwt.DecodeError:
raise Unauthorized(token=token, description="Could not decode access token")
except Exception:
raise Unauthorized(token=token, description="Unknown JWT error")
def sign(self, sub: UUID, token_lifetime: timedelta, custom_claims: dict = {}) -> str:
payload = {}
issued_at = datetime.utcnow()
expire_at = issued_at + token_lifetime
payload = {
"iat": issued_at,
"exp": expire_at,
"aud": self._audience,
"iss": self._issuer,
"sub": sub.hex,
}
if custom_claims:
payload.update(custom_claims)
headers = {}
if self._jwk.key_id is not None:
headers = {"kid": self._jwk.key_id}
return jwt.encode(payload, self._private_key, algorithm=self._algorithm, headers=headers)
| 40.13125
| 113
| 0.635104
|
from datetime import datetime, timedelta
from typing import Optional
from uuid import UUID
import jwt
from fastapi import HTTPException, status
from jwt.algorithms import Algorithm, get_default_algorithms
from jwt.api_jwk import PyJWK
from opal_common.authentication.types import JWTAlgorithm, JWTClaims, PrivateKey, PublicKey
from opal_common.logger import logger
class Unauthorized(HTTPException):
def __init__(self, description="Bearer token is not valid!", **kwargs):
super().__init__(
status_code=status.HTTP_401_UNAUTHORIZED,
detail={
"error": description,
**kwargs
},
headers={"WWW-Authenticate": "Bearer"}
)
class InvalidJWTCryptoKeysException(Exception):
pass
class JWTSigner:
def __init__(
self,
private_key: Optional[PrivateKey],
public_key: Optional[PublicKey],
algorithm: JWTAlgorithm,
audience: str,
issuer: str,
):
self._private_key = private_key
self._public_key = public_key
self._algorithm: str = algorithm.value
self._audience = audience
self._issuer = issuer
self._enabled = True
self._verify_crypto_keys()
def _verify_crypto_keys(self):
if self._private_key is not None and self._public_key is not None:
token = jwt.encode({"some": "payload"}, self._private_key, algorithm=self._algorithm)
try:
jwt.decode(token, self._public_key, algorithms=[self._algorithm])
except jwt.PyJWTError as exc:
logger.info("JWT Signer key verification failed with error: {err}", err=repr(exc))
raise InvalidJWTCryptoKeysException("private key and public key do not match!") from exc
# save jwk
self._jwk: PyJWK = PyJWK.from_json(self.get_jwk(), algorithm=self._algorithm)
elif (self._private_key != self._public_key) and (self._private_key is None or self._public_key is None):
raise ValueError("JWT Signer not valid, only one of private key / public key pair was provided!")
elif self._private_key is None and self._public_key is None:
# valid situation, running in dev mode and api security is off
self._enabled = False
logger.info("OPAL was not provided with JWT encryption keys, cannot verify api requests!")
else:
raise ValueError("Invalid JWT Signer input!")
def get_jwk(self) -> str:
algorithm: Optional[Algorithm] = get_default_algorithms().get(self._algorithm)
if algorithm is None:
raise ValueError(f"invalid jwt algorithm: {self._algorithm}")
return algorithm.to_jwk(self._public_key)
@property
def enabled(self):
return self._enabled
def verify(self, token: str) -> JWTClaims:
try:
return jwt.decode(
token,
self._public_key,
algorithms=[self._algorithm],
audience=self._audience,
issuer=self._issuer
)
except jwt.ExpiredSignatureError:
raise Unauthorized(token=token, description="Access token is expired")
except jwt.InvalidAudienceError:
raise Unauthorized(token=token, description="Invalid access token: invalid audience claim")
except jwt.InvalidIssuerError:
raise Unauthorized(token=token, description="Invalid access token: invalid issuer claim")
except jwt.DecodeError:
raise Unauthorized(token=token, description="Could not decode access token")
except Exception:
raise Unauthorized(token=token, description="Unknown JWT error")
def sign(self, sub: UUID, token_lifetime: timedelta, custom_claims: dict = {}) -> str:
payload = {}
issued_at = datetime.utcnow()
expire_at = issued_at + token_lifetime
payload = {
"iat": issued_at,
"exp": expire_at,
"aud": self._audience,
"iss": self._issuer,
"sub": sub.hex,
}
if custom_claims:
payload.update(custom_claims)
headers = {}
if self._jwk.key_id is not None:
headers = {"kid": self._jwk.key_id}
return jwt.encode(payload, self._private_key, algorithm=self._algorithm, headers=headers)
| true
| true
|
1c413c8e4a6d4550812da27880adbdcf07dbf6a4
| 12,271
|
py
|
Python
|
image/pixelsnail.py
|
arash-safari/vp
|
377e0172112157b79690b32349481a17e7590063
|
[
"MIT"
] | null | null | null |
image/pixelsnail.py
|
arash-safari/vp
|
377e0172112157b79690b32349481a17e7590063
|
[
"MIT"
] | null | null | null |
image/pixelsnail.py
|
arash-safari/vp
|
377e0172112157b79690b32349481a17e7590063
|
[
"MIT"
] | null | null | null |
# Copyright (c) Xi Chen
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Borrowed from https://github.com/neocxi/pixelsnail-public and ported it to PyTorch
from math import sqrt
from functools import partial, lru_cache
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
def wn_linear(in_dim, out_dim):
return nn.utils.weight_norm(nn.Linear(in_dim, out_dim))
class WNConv2d(nn.Module):
def __init__(
self,
in_channel,
out_channel,
kernel_size,
stride=1,
padding=0,
bias=True,
activation=None,
):
super().__init__()
self.conv = nn.utils.weight_norm(
nn.Conv2d(
in_channel,
out_channel,
kernel_size,
stride=stride,
padding=padding,
bias=bias,
)
)
self.out_channel = out_channel
if isinstance(kernel_size, int):
kernel_size = [kernel_size, kernel_size]
self.kernel_size = kernel_size
self.activation = activation
def forward(self, input):
out = self.conv(input)
if self.activation is not None:
out = self.activation(out)
return out
def shift_down(input, size=1):
return F.pad(input, [0, 0, size, 0])[:, :, : input.shape[2], :]
def shift_right(input, size=1):
return F.pad(input, [size, 0, 0, 0])[:, :, :, : input.shape[3]]
class CausalConv2d(nn.Module):
def __init__(
self,
in_channel,
out_channel,
kernel_size,
stride=1,
padding='downright',
activation=None,
):
super().__init__()
if isinstance(kernel_size, int):
kernel_size = [kernel_size] * 2
self.kernel_size = kernel_size
if padding == 'downright':
pad = [kernel_size[1] - 1, 0, kernel_size[0] - 1, 0]
elif padding == 'down' or padding == 'causal':
pad = kernel_size[1] // 2
pad = [pad, pad, kernel_size[0] - 1, 0]
self.causal = 0
if padding == 'causal':
self.causal = kernel_size[1] // 2
self.pad = nn.ZeroPad2d(pad)
self.conv = WNConv2d(
in_channel,
out_channel,
kernel_size,
stride=stride,
padding=0,
activation=activation,
)
def forward(self, input):
out = self.pad(input)
if self.causal > 0:
self.conv.conv.weight_v.data[:, :, -1, self.causal :].zero_()
out = self.conv(out)
return out
class GatedResBlock(nn.Module):
def __init__(
self,
in_channel,
channel,
kernel_size,
conv='wnconv2d',
activation=nn.ELU,
dropout=0.1,
auxiliary_channel=0,
condition_dim=0,
):
super().__init__()
if conv == 'wnconv2d':
conv_module = partial(WNConv2d, padding=kernel_size // 2)
elif conv == 'causal_downright':
conv_module = partial(CausalConv2d, padding='downright')
elif conv == 'causal':
conv_module = partial(CausalConv2d, padding='causal')
self.activation = activation(inplace=True)
self.conv1 = conv_module(in_channel, channel, kernel_size)
if auxiliary_channel > 0:
self.aux_conv = WNConv2d(auxiliary_channel, channel, 1)
self.dropout = nn.Dropout(dropout)
self.conv2 = conv_module(channel, in_channel * 2, kernel_size)
if condition_dim > 0:
# self.condition = nn.Linear(condition_dim, in_channel * 2, bias=False)
self.condition = WNConv2d(condition_dim, in_channel * 2, 1, bias=False)
self.gate = nn.GLU(1)
def forward(self, input, aux_input=None, condition=None):
out = self.conv1(self.activation(input))
# print('gateblock condition:{}'.format(condition))
if aux_input is not None:
out = out + self.aux_conv(self.activation(aux_input))
out = self.activation(out)
out = self.dropout(out)
out = self.conv2(out)
if condition is not None:
condition = self.condition(condition)
# print('condition: {}, out: {}'.format(condition.shape,out.shape))
out += condition
# out = out + condition.view(condition.shape[0], 1, 1, condition.shape[1])
out = self.gate(out)
out += input
return out
@lru_cache(maxsize=64)
def causal_mask(size):
shape = [size, size]
mask = np.triu(np.ones(shape), k=1).astype(np.uint8).T
start_mask = np.ones(size).astype(np.float32)
start_mask[0] = 0
return (
torch.from_numpy(mask).unsqueeze(0),
torch.from_numpy(start_mask).unsqueeze(1),
)
class CausalAttention(nn.Module):
def __init__(self, query_channel, key_channel, channel, n_head=8, dropout=0.1):
super().__init__()
self.query = wn_linear(query_channel, channel)
self.key = wn_linear(key_channel, channel)
self.value = wn_linear(key_channel, channel)
self.dim_head = channel // n_head
self.n_head = n_head
self.dropout = nn.Dropout(dropout)
def forward(self, query, key):
batch, _, height, width = key.shape
def reshape(input):
return input.view(batch, -1, self.n_head, self.dim_head).transpose(1, 2)
query_flat = query.view(batch, query.shape[1], -1).transpose(1, 2)
key_flat = key.view(batch, key.shape[1], -1).transpose(1, 2)
query = reshape(self.query(query_flat))
key = reshape(self.key(key_flat)).transpose(2, 3)
value = reshape(self.value(key_flat))
attn = torch.matmul(query, key) / sqrt(self.dim_head)
mask, start_mask = causal_mask(height * width)
mask = mask.type_as(query)
start_mask = start_mask.type_as(query)
attn = attn.masked_fill(mask == 0, -1e4)
attn = torch.softmax(attn, 3) * start_mask
attn = self.dropout(attn)
out = attn @ value
out = out.transpose(1, 2).reshape(
batch, height, width, self.dim_head * self.n_head
)
out = out.permute(0, 3, 1, 2)
return out
class PixelBlock(nn.Module):
def __init__(
self,
in_channel,
channel,
kernel_size,
n_res_block,
attention=True,
dropout=0.1,
condition_dim=0,
):
super().__init__()
resblocks = []
for i in range(n_res_block):
resblocks.append(
GatedResBlock(
in_channel,
channel,
kernel_size,
conv='causal',
dropout=dropout,
condition_dim=condition_dim,
)
)
self.resblocks = nn.ModuleList(resblocks)
self.attention = attention
if attention:
self.key_resblock = GatedResBlock(
in_channel * 2 + 2, in_channel, 1, dropout=dropout
)
self.query_resblock = GatedResBlock(
in_channel + 2, in_channel, 1, dropout=dropout
)
self.causal_attention = CausalAttention(
in_channel + 2, in_channel * 2 + 2, in_channel // 2, dropout=dropout
)
self.out_resblock = GatedResBlock(
in_channel,
in_channel,
1,
auxiliary_channel=in_channel // 2,
dropout=dropout,
)
else:
self.out = WNConv2d(in_channel + 2, in_channel, 1)
def forward(self, input, background, condition=None):
out = input
for resblock in self.resblocks:
# print('resblock condition:{}'.shape(condition.shape))
out = resblock(out, condition=condition)
if self.attention:
key_cat = torch.cat([input, out, background], 1)
key = self.key_resblock(key_cat)
query_cat = torch.cat([out, background], 1)
query = self.query_resblock(query_cat)
attn_out = self.causal_attention(query, key)
out = self.out_resblock(out, attn_out)
else:
bg_cat = torch.cat([out, background], 1)
out = self.out(bg_cat)
return out
class CondResNet(nn.Module):
def __init__(self, in_channel, channel, kernel_size, n_res_block):
super().__init__()
blocks = [WNConv2d(in_channel, channel, kernel_size, padding=kernel_size // 2)]
for i in range(n_res_block):
blocks.append(GatedResBlock(channel, channel, kernel_size))
self.blocks = nn.Sequential(*blocks)
def forward(self, input):
return self.blocks(input)
class PixelSNAIL(nn.Module):
def __init__(
self,
shape,
n_class,
channel,
kernel_size,
n_block,
n_res_block,
res_channel,
attention=True,
dropout=0.1,
n_cond_res_block=0,
cond_res_channel=0,
cond_res_kernel=3,
n_out_res_block=0,
):
super().__init__()
channels, height, width = shape
self.n_class = n_class
if kernel_size % 2 == 0:
kernel = kernel_size + 1
else:
kernel = kernel_size
self.horizontal = CausalConv2d(
n_class, channel, [kernel // 2, kernel], padding='down'
)
self.vertical = CausalConv2d(
n_class, channel, [(kernel + 1) // 2, kernel // 2], padding='downright'
)
coord_x = (torch.arange(height).float() - height / 2) / height
coord_x = coord_x.view(1, 1, height, 1).expand(1, 1, height, width)
coord_y = (torch.arange(width).float() - width / 2) / width
coord_y = coord_y.view(1, 1, 1, width).expand(1, 1, height, width)
self.register_buffer('background', torch.cat([coord_x, coord_y], 1))
self.blocks = nn.ModuleList()
for i in range(n_block):
self.blocks.append(
PixelBlock(
channel,
res_channel,
kernel_size,
n_res_block,
attention=attention,
dropout=dropout,
condition_dim=cond_res_channel,
)
)
if n_cond_res_block > 0:
self.cond_resnet = CondResNet(
n_class, cond_res_channel, cond_res_kernel, n_cond_res_block
)
out = []
for i in range(n_out_res_block):
out.append(GatedResBlock(channel, res_channel, 1))
out.extend([nn.ELU(inplace=True), WNConv2d(channel, n_class, 1)])
self.out = nn.Sequential(*out)
def forward(self, input, condition=None, cache=None):
if cache is None:
cache = {}
batch, height, width = input.shape
input = (
F.one_hot(input, self.n_class).permute(0, 3, 1, 2).type_as(self.background)
)
horizontal = shift_down(self.horizontal(input))
vertical = shift_right(self.vertical(input))
out = horizontal + vertical
background = self.background[:, :, :height, :].expand(batch, 2, height, width)
if condition is not None:
if 'condition' in cache:
condition = cache['condition']
condition = condition[:, :, :height, :]
else:
condition = (
F.one_hot(condition, self.n_class)
.permute(0, 3, 1, 2)
.type_as(self.background)
)
condition = self.cond_resnet(condition)
condition = F.interpolate(condition, scale_factor=2)
cache['condition'] = condition.detach().clone()
condition = condition[:, :, :height, :]
for block in self.blocks:
out = block(out, background, condition=condition)
out = self.out(out)
return out, cache
| 28.274194
| 87
| 0.556271
|
from math import sqrt
from functools import partial, lru_cache
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
def wn_linear(in_dim, out_dim):
return nn.utils.weight_norm(nn.Linear(in_dim, out_dim))
class WNConv2d(nn.Module):
def __init__(
self,
in_channel,
out_channel,
kernel_size,
stride=1,
padding=0,
bias=True,
activation=None,
):
super().__init__()
self.conv = nn.utils.weight_norm(
nn.Conv2d(
in_channel,
out_channel,
kernel_size,
stride=stride,
padding=padding,
bias=bias,
)
)
self.out_channel = out_channel
if isinstance(kernel_size, int):
kernel_size = [kernel_size, kernel_size]
self.kernel_size = kernel_size
self.activation = activation
def forward(self, input):
out = self.conv(input)
if self.activation is not None:
out = self.activation(out)
return out
def shift_down(input, size=1):
return F.pad(input, [0, 0, size, 0])[:, :, : input.shape[2], :]
def shift_right(input, size=1):
return F.pad(input, [size, 0, 0, 0])[:, :, :, : input.shape[3]]
class CausalConv2d(nn.Module):
def __init__(
self,
in_channel,
out_channel,
kernel_size,
stride=1,
padding='downright',
activation=None,
):
super().__init__()
if isinstance(kernel_size, int):
kernel_size = [kernel_size] * 2
self.kernel_size = kernel_size
if padding == 'downright':
pad = [kernel_size[1] - 1, 0, kernel_size[0] - 1, 0]
elif padding == 'down' or padding == 'causal':
pad = kernel_size[1] // 2
pad = [pad, pad, kernel_size[0] - 1, 0]
self.causal = 0
if padding == 'causal':
self.causal = kernel_size[1] // 2
self.pad = nn.ZeroPad2d(pad)
self.conv = WNConv2d(
in_channel,
out_channel,
kernel_size,
stride=stride,
padding=0,
activation=activation,
)
def forward(self, input):
out = self.pad(input)
if self.causal > 0:
self.conv.conv.weight_v.data[:, :, -1, self.causal :].zero_()
out = self.conv(out)
return out
class GatedResBlock(nn.Module):
def __init__(
self,
in_channel,
channel,
kernel_size,
conv='wnconv2d',
activation=nn.ELU,
dropout=0.1,
auxiliary_channel=0,
condition_dim=0,
):
super().__init__()
if conv == 'wnconv2d':
conv_module = partial(WNConv2d, padding=kernel_size // 2)
elif conv == 'causal_downright':
conv_module = partial(CausalConv2d, padding='downright')
elif conv == 'causal':
conv_module = partial(CausalConv2d, padding='causal')
self.activation = activation(inplace=True)
self.conv1 = conv_module(in_channel, channel, kernel_size)
if auxiliary_channel > 0:
self.aux_conv = WNConv2d(auxiliary_channel, channel, 1)
self.dropout = nn.Dropout(dropout)
self.conv2 = conv_module(channel, in_channel * 2, kernel_size)
if condition_dim > 0:
self.condition = WNConv2d(condition_dim, in_channel * 2, 1, bias=False)
self.gate = nn.GLU(1)
def forward(self, input, aux_input=None, condition=None):
out = self.conv1(self.activation(input))
if aux_input is not None:
out = out + self.aux_conv(self.activation(aux_input))
out = self.activation(out)
out = self.dropout(out)
out = self.conv2(out)
if condition is not None:
condition = self.condition(condition)
out += condition
out = self.gate(out)
out += input
return out
@lru_cache(maxsize=64)
def causal_mask(size):
shape = [size, size]
mask = np.triu(np.ones(shape), k=1).astype(np.uint8).T
start_mask = np.ones(size).astype(np.float32)
start_mask[0] = 0
return (
torch.from_numpy(mask).unsqueeze(0),
torch.from_numpy(start_mask).unsqueeze(1),
)
class CausalAttention(nn.Module):
def __init__(self, query_channel, key_channel, channel, n_head=8, dropout=0.1):
super().__init__()
self.query = wn_linear(query_channel, channel)
self.key = wn_linear(key_channel, channel)
self.value = wn_linear(key_channel, channel)
self.dim_head = channel // n_head
self.n_head = n_head
self.dropout = nn.Dropout(dropout)
def forward(self, query, key):
batch, _, height, width = key.shape
def reshape(input):
return input.view(batch, -1, self.n_head, self.dim_head).transpose(1, 2)
query_flat = query.view(batch, query.shape[1], -1).transpose(1, 2)
key_flat = key.view(batch, key.shape[1], -1).transpose(1, 2)
query = reshape(self.query(query_flat))
key = reshape(self.key(key_flat)).transpose(2, 3)
value = reshape(self.value(key_flat))
attn = torch.matmul(query, key) / sqrt(self.dim_head)
mask, start_mask = causal_mask(height * width)
mask = mask.type_as(query)
start_mask = start_mask.type_as(query)
attn = attn.masked_fill(mask == 0, -1e4)
attn = torch.softmax(attn, 3) * start_mask
attn = self.dropout(attn)
out = attn @ value
out = out.transpose(1, 2).reshape(
batch, height, width, self.dim_head * self.n_head
)
out = out.permute(0, 3, 1, 2)
return out
class PixelBlock(nn.Module):
def __init__(
self,
in_channel,
channel,
kernel_size,
n_res_block,
attention=True,
dropout=0.1,
condition_dim=0,
):
super().__init__()
resblocks = []
for i in range(n_res_block):
resblocks.append(
GatedResBlock(
in_channel,
channel,
kernel_size,
conv='causal',
dropout=dropout,
condition_dim=condition_dim,
)
)
self.resblocks = nn.ModuleList(resblocks)
self.attention = attention
if attention:
self.key_resblock = GatedResBlock(
in_channel * 2 + 2, in_channel, 1, dropout=dropout
)
self.query_resblock = GatedResBlock(
in_channel + 2, in_channel, 1, dropout=dropout
)
self.causal_attention = CausalAttention(
in_channel + 2, in_channel * 2 + 2, in_channel // 2, dropout=dropout
)
self.out_resblock = GatedResBlock(
in_channel,
in_channel,
1,
auxiliary_channel=in_channel // 2,
dropout=dropout,
)
else:
self.out = WNConv2d(in_channel + 2, in_channel, 1)
def forward(self, input, background, condition=None):
out = input
for resblock in self.resblocks:
out = resblock(out, condition=condition)
if self.attention:
key_cat = torch.cat([input, out, background], 1)
key = self.key_resblock(key_cat)
query_cat = torch.cat([out, background], 1)
query = self.query_resblock(query_cat)
attn_out = self.causal_attention(query, key)
out = self.out_resblock(out, attn_out)
else:
bg_cat = torch.cat([out, background], 1)
out = self.out(bg_cat)
return out
class CondResNet(nn.Module):
def __init__(self, in_channel, channel, kernel_size, n_res_block):
super().__init__()
blocks = [WNConv2d(in_channel, channel, kernel_size, padding=kernel_size // 2)]
for i in range(n_res_block):
blocks.append(GatedResBlock(channel, channel, kernel_size))
self.blocks = nn.Sequential(*blocks)
def forward(self, input):
return self.blocks(input)
class PixelSNAIL(nn.Module):
def __init__(
self,
shape,
n_class,
channel,
kernel_size,
n_block,
n_res_block,
res_channel,
attention=True,
dropout=0.1,
n_cond_res_block=0,
cond_res_channel=0,
cond_res_kernel=3,
n_out_res_block=0,
):
super().__init__()
channels, height, width = shape
self.n_class = n_class
if kernel_size % 2 == 0:
kernel = kernel_size + 1
else:
kernel = kernel_size
self.horizontal = CausalConv2d(
n_class, channel, [kernel // 2, kernel], padding='down'
)
self.vertical = CausalConv2d(
n_class, channel, [(kernel + 1) // 2, kernel // 2], padding='downright'
)
coord_x = (torch.arange(height).float() - height / 2) / height
coord_x = coord_x.view(1, 1, height, 1).expand(1, 1, height, width)
coord_y = (torch.arange(width).float() - width / 2) / width
coord_y = coord_y.view(1, 1, 1, width).expand(1, 1, height, width)
self.register_buffer('background', torch.cat([coord_x, coord_y], 1))
self.blocks = nn.ModuleList()
for i in range(n_block):
self.blocks.append(
PixelBlock(
channel,
res_channel,
kernel_size,
n_res_block,
attention=attention,
dropout=dropout,
condition_dim=cond_res_channel,
)
)
if n_cond_res_block > 0:
self.cond_resnet = CondResNet(
n_class, cond_res_channel, cond_res_kernel, n_cond_res_block
)
out = []
for i in range(n_out_res_block):
out.append(GatedResBlock(channel, res_channel, 1))
out.extend([nn.ELU(inplace=True), WNConv2d(channel, n_class, 1)])
self.out = nn.Sequential(*out)
def forward(self, input, condition=None, cache=None):
if cache is None:
cache = {}
batch, height, width = input.shape
input = (
F.one_hot(input, self.n_class).permute(0, 3, 1, 2).type_as(self.background)
)
horizontal = shift_down(self.horizontal(input))
vertical = shift_right(self.vertical(input))
out = horizontal + vertical
background = self.background[:, :, :height, :].expand(batch, 2, height, width)
if condition is not None:
if 'condition' in cache:
condition = cache['condition']
condition = condition[:, :, :height, :]
else:
condition = (
F.one_hot(condition, self.n_class)
.permute(0, 3, 1, 2)
.type_as(self.background)
)
condition = self.cond_resnet(condition)
condition = F.interpolate(condition, scale_factor=2)
cache['condition'] = condition.detach().clone()
condition = condition[:, :, :height, :]
for block in self.blocks:
out = block(out, background, condition=condition)
out = self.out(out)
return out, cache
| true
| true
|
1c413da1b8018361ba7e7266e253374b53988013
| 30,674
|
py
|
Python
|
sdk/python/pulumi_azure_native/devices/v20160203/_inputs.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | 31
|
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_native/devices/v20160203/_inputs.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | 231
|
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_native/devices/v20160203/_inputs.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | 4
|
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from ._enums import *
__all__ = [
'CloudToDevicePropertiesArgs',
'EventHubPropertiesArgs',
'FeedbackPropertiesArgs',
'IotHubPropertiesArgs',
'IotHubSkuInfoArgs',
'IpFilterRuleArgs',
'MessagingEndpointPropertiesArgs',
'OperationsMonitoringPropertiesArgs',
'SharedAccessSignatureAuthorizationRuleArgs',
'StorageEndpointPropertiesArgs',
]
@pulumi.input_type
class CloudToDevicePropertiesArgs:
def __init__(__self__, *,
default_ttl_as_iso8601: Optional[pulumi.Input[str]] = None,
feedback: Optional[pulumi.Input['FeedbackPropertiesArgs']] = None,
max_delivery_count: Optional[pulumi.Input[int]] = None):
"""
The IoT hub cloud-to-device messaging properties.
:param pulumi.Input[str] default_ttl_as_iso8601: The default time to live for cloud-to-device messages in the device queue. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:param pulumi.Input['FeedbackPropertiesArgs'] feedback: The properties of the feedback queue for cloud-to-device messages.
:param pulumi.Input[int] max_delivery_count: The max delivery count for cloud-to-device messages in the device queue. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
"""
if default_ttl_as_iso8601 is not None:
pulumi.set(__self__, "default_ttl_as_iso8601", default_ttl_as_iso8601)
if feedback is not None:
pulumi.set(__self__, "feedback", feedback)
if max_delivery_count is not None:
pulumi.set(__self__, "max_delivery_count", max_delivery_count)
@property
@pulumi.getter(name="defaultTtlAsIso8601")
def default_ttl_as_iso8601(self) -> Optional[pulumi.Input[str]]:
"""
The default time to live for cloud-to-device messages in the device queue. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
"""
return pulumi.get(self, "default_ttl_as_iso8601")
@default_ttl_as_iso8601.setter
def default_ttl_as_iso8601(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "default_ttl_as_iso8601", value)
@property
@pulumi.getter
def feedback(self) -> Optional[pulumi.Input['FeedbackPropertiesArgs']]:
"""
The properties of the feedback queue for cloud-to-device messages.
"""
return pulumi.get(self, "feedback")
@feedback.setter
def feedback(self, value: Optional[pulumi.Input['FeedbackPropertiesArgs']]):
pulumi.set(self, "feedback", value)
@property
@pulumi.getter(name="maxDeliveryCount")
def max_delivery_count(self) -> Optional[pulumi.Input[int]]:
"""
The max delivery count for cloud-to-device messages in the device queue. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
"""
return pulumi.get(self, "max_delivery_count")
@max_delivery_count.setter
def max_delivery_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_delivery_count", value)
@pulumi.input_type
class EventHubPropertiesArgs:
def __init__(__self__, *,
partition_count: Optional[pulumi.Input[int]] = None,
retention_time_in_days: Optional[pulumi.Input[float]] = None):
"""
The properties of the provisioned Event Hub-compatible endpoint used by the IoT hub.
:param pulumi.Input[int] partition_count: The number of partitions for receiving device-to-cloud messages in the Event Hub-compatible endpoint. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages.
:param pulumi.Input[float] retention_time_in_days: The retention time for device-to-cloud messages in days. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages
"""
if partition_count is not None:
pulumi.set(__self__, "partition_count", partition_count)
if retention_time_in_days is not None:
pulumi.set(__self__, "retention_time_in_days", retention_time_in_days)
@property
@pulumi.getter(name="partitionCount")
def partition_count(self) -> Optional[pulumi.Input[int]]:
"""
The number of partitions for receiving device-to-cloud messages in the Event Hub-compatible endpoint. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages.
"""
return pulumi.get(self, "partition_count")
@partition_count.setter
def partition_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "partition_count", value)
@property
@pulumi.getter(name="retentionTimeInDays")
def retention_time_in_days(self) -> Optional[pulumi.Input[float]]:
"""
The retention time for device-to-cloud messages in days. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages
"""
return pulumi.get(self, "retention_time_in_days")
@retention_time_in_days.setter
def retention_time_in_days(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "retention_time_in_days", value)
@pulumi.input_type
class FeedbackPropertiesArgs:
def __init__(__self__, *,
lock_duration_as_iso8601: Optional[pulumi.Input[str]] = None,
max_delivery_count: Optional[pulumi.Input[int]] = None,
ttl_as_iso8601: Optional[pulumi.Input[str]] = None):
"""
The properties of the feedback queue for cloud-to-device messages.
:param pulumi.Input[str] lock_duration_as_iso8601: The lock duration for the feedback queue. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:param pulumi.Input[int] max_delivery_count: The number of times the IoT hub attempts to deliver a message on the feedback queue. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:param pulumi.Input[str] ttl_as_iso8601: The period of time for which a message is available to consume before it is expired by the IoT hub. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
"""
if lock_duration_as_iso8601 is not None:
pulumi.set(__self__, "lock_duration_as_iso8601", lock_duration_as_iso8601)
if max_delivery_count is not None:
pulumi.set(__self__, "max_delivery_count", max_delivery_count)
if ttl_as_iso8601 is not None:
pulumi.set(__self__, "ttl_as_iso8601", ttl_as_iso8601)
@property
@pulumi.getter(name="lockDurationAsIso8601")
def lock_duration_as_iso8601(self) -> Optional[pulumi.Input[str]]:
"""
The lock duration for the feedback queue. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
"""
return pulumi.get(self, "lock_duration_as_iso8601")
@lock_duration_as_iso8601.setter
def lock_duration_as_iso8601(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "lock_duration_as_iso8601", value)
@property
@pulumi.getter(name="maxDeliveryCount")
def max_delivery_count(self) -> Optional[pulumi.Input[int]]:
"""
The number of times the IoT hub attempts to deliver a message on the feedback queue. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
"""
return pulumi.get(self, "max_delivery_count")
@max_delivery_count.setter
def max_delivery_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_delivery_count", value)
@property
@pulumi.getter(name="ttlAsIso8601")
def ttl_as_iso8601(self) -> Optional[pulumi.Input[str]]:
"""
The period of time for which a message is available to consume before it is expired by the IoT hub. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
"""
return pulumi.get(self, "ttl_as_iso8601")
@ttl_as_iso8601.setter
def ttl_as_iso8601(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ttl_as_iso8601", value)
@pulumi.input_type
class IotHubPropertiesArgs:
def __init__(__self__, *,
authorization_policies: Optional[pulumi.Input[Sequence[pulumi.Input['SharedAccessSignatureAuthorizationRuleArgs']]]] = None,
cloud_to_device: Optional[pulumi.Input['CloudToDevicePropertiesArgs']] = None,
comments: Optional[pulumi.Input[str]] = None,
enable_file_upload_notifications: Optional[pulumi.Input[bool]] = None,
event_hub_endpoints: Optional[pulumi.Input[Mapping[str, pulumi.Input['EventHubPropertiesArgs']]]] = None,
features: Optional[pulumi.Input[Union[str, 'Capabilities']]] = None,
ip_filter_rules: Optional[pulumi.Input[Sequence[pulumi.Input['IpFilterRuleArgs']]]] = None,
messaging_endpoints: Optional[pulumi.Input[Mapping[str, pulumi.Input['MessagingEndpointPropertiesArgs']]]] = None,
operations_monitoring_properties: Optional[pulumi.Input['OperationsMonitoringPropertiesArgs']] = None,
storage_endpoints: Optional[pulumi.Input[Mapping[str, pulumi.Input['StorageEndpointPropertiesArgs']]]] = None):
"""
The properties of an IoT hub.
:param pulumi.Input[Sequence[pulumi.Input['SharedAccessSignatureAuthorizationRuleArgs']]] authorization_policies: The shared access policies you can use to secure a connection to the IoT hub.
:param pulumi.Input['CloudToDevicePropertiesArgs'] cloud_to_device: The IoT hub cloud-to-device messaging properties.
:param pulumi.Input[str] comments: Comments.
:param pulumi.Input[bool] enable_file_upload_notifications: If True, file upload notifications are enabled.
:param pulumi.Input[Mapping[str, pulumi.Input['EventHubPropertiesArgs']]] event_hub_endpoints: The Event Hub-compatible endpoint properties. The possible keys to this dictionary are events and operationsMonitoringEvents. Both of these keys have to be present in the dictionary while making create or update calls for the IoT hub.
:param pulumi.Input[Union[str, 'Capabilities']] features: The capabilities and features enabled for the IoT hub.
:param pulumi.Input[Sequence[pulumi.Input['IpFilterRuleArgs']]] ip_filter_rules: The IP filter rules.
:param pulumi.Input[Mapping[str, pulumi.Input['MessagingEndpointPropertiesArgs']]] messaging_endpoints: The messaging endpoint properties for the file upload notification queue.
:param pulumi.Input['OperationsMonitoringPropertiesArgs'] operations_monitoring_properties: The operations monitoring properties for the IoT hub. The possible keys to the dictionary are Connections, DeviceTelemetry, C2DCommands, DeviceIdentityOperations, FileUploadOperations.
:param pulumi.Input[Mapping[str, pulumi.Input['StorageEndpointPropertiesArgs']]] storage_endpoints: The list of Azure Storage endpoints where you can upload files. Currently you can configure only one Azure Storage account and that MUST have its key as $default. Specifying more than one storage account causes an error to be thrown. Not specifying a value for this property when the enableFileUploadNotifications property is set to True, causes an error to be thrown.
"""
if authorization_policies is not None:
pulumi.set(__self__, "authorization_policies", authorization_policies)
if cloud_to_device is not None:
pulumi.set(__self__, "cloud_to_device", cloud_to_device)
if comments is not None:
pulumi.set(__self__, "comments", comments)
if enable_file_upload_notifications is not None:
pulumi.set(__self__, "enable_file_upload_notifications", enable_file_upload_notifications)
if event_hub_endpoints is not None:
pulumi.set(__self__, "event_hub_endpoints", event_hub_endpoints)
if features is not None:
pulumi.set(__self__, "features", features)
if ip_filter_rules is not None:
pulumi.set(__self__, "ip_filter_rules", ip_filter_rules)
if messaging_endpoints is not None:
pulumi.set(__self__, "messaging_endpoints", messaging_endpoints)
if operations_monitoring_properties is not None:
pulumi.set(__self__, "operations_monitoring_properties", operations_monitoring_properties)
if storage_endpoints is not None:
pulumi.set(__self__, "storage_endpoints", storage_endpoints)
@property
@pulumi.getter(name="authorizationPolicies")
def authorization_policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SharedAccessSignatureAuthorizationRuleArgs']]]]:
"""
The shared access policies you can use to secure a connection to the IoT hub.
"""
return pulumi.get(self, "authorization_policies")
@authorization_policies.setter
def authorization_policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SharedAccessSignatureAuthorizationRuleArgs']]]]):
pulumi.set(self, "authorization_policies", value)
@property
@pulumi.getter(name="cloudToDevice")
def cloud_to_device(self) -> Optional[pulumi.Input['CloudToDevicePropertiesArgs']]:
"""
The IoT hub cloud-to-device messaging properties.
"""
return pulumi.get(self, "cloud_to_device")
@cloud_to_device.setter
def cloud_to_device(self, value: Optional[pulumi.Input['CloudToDevicePropertiesArgs']]):
pulumi.set(self, "cloud_to_device", value)
@property
@pulumi.getter
def comments(self) -> Optional[pulumi.Input[str]]:
"""
Comments.
"""
return pulumi.get(self, "comments")
@comments.setter
def comments(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "comments", value)
@property
@pulumi.getter(name="enableFileUploadNotifications")
def enable_file_upload_notifications(self) -> Optional[pulumi.Input[bool]]:
"""
If True, file upload notifications are enabled.
"""
return pulumi.get(self, "enable_file_upload_notifications")
@enable_file_upload_notifications.setter
def enable_file_upload_notifications(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_file_upload_notifications", value)
@property
@pulumi.getter(name="eventHubEndpoints")
def event_hub_endpoints(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['EventHubPropertiesArgs']]]]:
"""
The Event Hub-compatible endpoint properties. The possible keys to this dictionary are events and operationsMonitoringEvents. Both of these keys have to be present in the dictionary while making create or update calls for the IoT hub.
"""
return pulumi.get(self, "event_hub_endpoints")
@event_hub_endpoints.setter
def event_hub_endpoints(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['EventHubPropertiesArgs']]]]):
pulumi.set(self, "event_hub_endpoints", value)
@property
@pulumi.getter
def features(self) -> Optional[pulumi.Input[Union[str, 'Capabilities']]]:
"""
The capabilities and features enabled for the IoT hub.
"""
return pulumi.get(self, "features")
@features.setter
def features(self, value: Optional[pulumi.Input[Union[str, 'Capabilities']]]):
pulumi.set(self, "features", value)
@property
@pulumi.getter(name="ipFilterRules")
def ip_filter_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IpFilterRuleArgs']]]]:
"""
The IP filter rules.
"""
return pulumi.get(self, "ip_filter_rules")
@ip_filter_rules.setter
def ip_filter_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['IpFilterRuleArgs']]]]):
pulumi.set(self, "ip_filter_rules", value)
@property
@pulumi.getter(name="messagingEndpoints")
def messaging_endpoints(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['MessagingEndpointPropertiesArgs']]]]:
"""
The messaging endpoint properties for the file upload notification queue.
"""
return pulumi.get(self, "messaging_endpoints")
@messaging_endpoints.setter
def messaging_endpoints(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['MessagingEndpointPropertiesArgs']]]]):
pulumi.set(self, "messaging_endpoints", value)
@property
@pulumi.getter(name="operationsMonitoringProperties")
def operations_monitoring_properties(self) -> Optional[pulumi.Input['OperationsMonitoringPropertiesArgs']]:
"""
The operations monitoring properties for the IoT hub. The possible keys to the dictionary are Connections, DeviceTelemetry, C2DCommands, DeviceIdentityOperations, FileUploadOperations.
"""
return pulumi.get(self, "operations_monitoring_properties")
@operations_monitoring_properties.setter
def operations_monitoring_properties(self, value: Optional[pulumi.Input['OperationsMonitoringPropertiesArgs']]):
pulumi.set(self, "operations_monitoring_properties", value)
@property
@pulumi.getter(name="storageEndpoints")
def storage_endpoints(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['StorageEndpointPropertiesArgs']]]]:
"""
The list of Azure Storage endpoints where you can upload files. Currently you can configure only one Azure Storage account and that MUST have its key as $default. Specifying more than one storage account causes an error to be thrown. Not specifying a value for this property when the enableFileUploadNotifications property is set to True, causes an error to be thrown.
"""
return pulumi.get(self, "storage_endpoints")
@storage_endpoints.setter
def storage_endpoints(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['StorageEndpointPropertiesArgs']]]]):
pulumi.set(self, "storage_endpoints", value)
@pulumi.input_type
class IotHubSkuInfoArgs:
def __init__(__self__, *,
capacity: pulumi.Input[float],
name: pulumi.Input[Union[str, 'IotHubSku']]):
"""
Information about the SKU of the IoT hub.
:param pulumi.Input[float] capacity: The number of provisioned IoT Hub units. See: https://docs.microsoft.com/azure/azure-subscription-service-limits#iot-hub-limits.
:param pulumi.Input[Union[str, 'IotHubSku']] name: The name of the SKU.
"""
pulumi.set(__self__, "capacity", capacity)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def capacity(self) -> pulumi.Input[float]:
"""
The number of provisioned IoT Hub units. See: https://docs.microsoft.com/azure/azure-subscription-service-limits#iot-hub-limits.
"""
return pulumi.get(self, "capacity")
@capacity.setter
def capacity(self, value: pulumi.Input[float]):
pulumi.set(self, "capacity", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[Union[str, 'IotHubSku']]:
"""
The name of the SKU.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[Union[str, 'IotHubSku']]):
pulumi.set(self, "name", value)
@pulumi.input_type
class IpFilterRuleArgs:
def __init__(__self__, *,
action: pulumi.Input['IpFilterActionType'],
filter_name: pulumi.Input[str],
ip_mask: pulumi.Input[str]):
"""
The IP filter rules for the IoT hub.
:param pulumi.Input['IpFilterActionType'] action: The desired action for requests captured by this rule.
:param pulumi.Input[str] filter_name: The name of the IP filter rule.
:param pulumi.Input[str] ip_mask: A string that contains the IP address range in CIDR notation for the rule.
"""
pulumi.set(__self__, "action", action)
pulumi.set(__self__, "filter_name", filter_name)
pulumi.set(__self__, "ip_mask", ip_mask)
@property
@pulumi.getter
def action(self) -> pulumi.Input['IpFilterActionType']:
"""
The desired action for requests captured by this rule.
"""
return pulumi.get(self, "action")
@action.setter
def action(self, value: pulumi.Input['IpFilterActionType']):
pulumi.set(self, "action", value)
@property
@pulumi.getter(name="filterName")
def filter_name(self) -> pulumi.Input[str]:
"""
The name of the IP filter rule.
"""
return pulumi.get(self, "filter_name")
@filter_name.setter
def filter_name(self, value: pulumi.Input[str]):
pulumi.set(self, "filter_name", value)
@property
@pulumi.getter(name="ipMask")
def ip_mask(self) -> pulumi.Input[str]:
"""
A string that contains the IP address range in CIDR notation for the rule.
"""
return pulumi.get(self, "ip_mask")
@ip_mask.setter
def ip_mask(self, value: pulumi.Input[str]):
pulumi.set(self, "ip_mask", value)
@pulumi.input_type
class MessagingEndpointPropertiesArgs:
def __init__(__self__, *,
lock_duration_as_iso8601: Optional[pulumi.Input[str]] = None,
max_delivery_count: Optional[pulumi.Input[int]] = None,
ttl_as_iso8601: Optional[pulumi.Input[str]] = None):
"""
The properties of the messaging endpoints used by this IoT hub.
:param pulumi.Input[str] lock_duration_as_iso8601: The lock duration. See: https://docs.microsoft.com/en-us/azure/iot-hub/iot-hub-devguide-file-upload.
:param pulumi.Input[int] max_delivery_count: The number of times the IoT hub attempts to deliver a message. See: https://docs.microsoft.com/en-us/azure/iot-hub/iot-hub-devguide-file-upload.
:param pulumi.Input[str] ttl_as_iso8601: The period of time for which a message is available to consume before it is expired by the IoT hub. See: https://docs.microsoft.com/en-us/azure/iot-hub/iot-hub-devguide-file-upload.
"""
if lock_duration_as_iso8601 is not None:
pulumi.set(__self__, "lock_duration_as_iso8601", lock_duration_as_iso8601)
if max_delivery_count is not None:
pulumi.set(__self__, "max_delivery_count", max_delivery_count)
if ttl_as_iso8601 is not None:
pulumi.set(__self__, "ttl_as_iso8601", ttl_as_iso8601)
@property
@pulumi.getter(name="lockDurationAsIso8601")
def lock_duration_as_iso8601(self) -> Optional[pulumi.Input[str]]:
"""
The lock duration. See: https://docs.microsoft.com/en-us/azure/iot-hub/iot-hub-devguide-file-upload.
"""
return pulumi.get(self, "lock_duration_as_iso8601")
@lock_duration_as_iso8601.setter
def lock_duration_as_iso8601(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "lock_duration_as_iso8601", value)
@property
@pulumi.getter(name="maxDeliveryCount")
def max_delivery_count(self) -> Optional[pulumi.Input[int]]:
"""
The number of times the IoT hub attempts to deliver a message. See: https://docs.microsoft.com/en-us/azure/iot-hub/iot-hub-devguide-file-upload.
"""
return pulumi.get(self, "max_delivery_count")
@max_delivery_count.setter
def max_delivery_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_delivery_count", value)
@property
@pulumi.getter(name="ttlAsIso8601")
def ttl_as_iso8601(self) -> Optional[pulumi.Input[str]]:
"""
The period of time for which a message is available to consume before it is expired by the IoT hub. See: https://docs.microsoft.com/en-us/azure/iot-hub/iot-hub-devguide-file-upload.
"""
return pulumi.get(self, "ttl_as_iso8601")
@ttl_as_iso8601.setter
def ttl_as_iso8601(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ttl_as_iso8601", value)
@pulumi.input_type
class OperationsMonitoringPropertiesArgs:
def __init__(__self__, *,
events: Optional[pulumi.Input[Mapping[str, pulumi.Input[Union[str, 'OperationMonitoringLevel']]]]] = None):
"""
The operations monitoring properties for the IoT hub. The possible keys to the dictionary are Connections, DeviceTelemetry, C2DCommands, DeviceIdentityOperations, FileUploadOperations.
"""
if events is not None:
pulumi.set(__self__, "events", events)
@property
@pulumi.getter
def events(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Union[str, 'OperationMonitoringLevel']]]]]:
return pulumi.get(self, "events")
@events.setter
def events(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Union[str, 'OperationMonitoringLevel']]]]]):
pulumi.set(self, "events", value)
@pulumi.input_type
class SharedAccessSignatureAuthorizationRuleArgs:
def __init__(__self__, *,
key_name: pulumi.Input[str],
rights: pulumi.Input['AccessRights'],
primary_key: Optional[pulumi.Input[str]] = None,
secondary_key: Optional[pulumi.Input[str]] = None):
"""
The properties of an IoT hub shared access policy.
:param pulumi.Input[str] key_name: The name of the shared access policy.
:param pulumi.Input['AccessRights'] rights: The permissions assigned to the shared access policy.
:param pulumi.Input[str] primary_key: The primary key.
:param pulumi.Input[str] secondary_key: The secondary key.
"""
pulumi.set(__self__, "key_name", key_name)
pulumi.set(__self__, "rights", rights)
if primary_key is not None:
pulumi.set(__self__, "primary_key", primary_key)
if secondary_key is not None:
pulumi.set(__self__, "secondary_key", secondary_key)
@property
@pulumi.getter(name="keyName")
def key_name(self) -> pulumi.Input[str]:
"""
The name of the shared access policy.
"""
return pulumi.get(self, "key_name")
@key_name.setter
def key_name(self, value: pulumi.Input[str]):
pulumi.set(self, "key_name", value)
@property
@pulumi.getter
def rights(self) -> pulumi.Input['AccessRights']:
"""
The permissions assigned to the shared access policy.
"""
return pulumi.get(self, "rights")
@rights.setter
def rights(self, value: pulumi.Input['AccessRights']):
pulumi.set(self, "rights", value)
@property
@pulumi.getter(name="primaryKey")
def primary_key(self) -> Optional[pulumi.Input[str]]:
"""
The primary key.
"""
return pulumi.get(self, "primary_key")
@primary_key.setter
def primary_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "primary_key", value)
@property
@pulumi.getter(name="secondaryKey")
def secondary_key(self) -> Optional[pulumi.Input[str]]:
"""
The secondary key.
"""
return pulumi.get(self, "secondary_key")
@secondary_key.setter
def secondary_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secondary_key", value)
@pulumi.input_type
class StorageEndpointPropertiesArgs:
def __init__(__self__, *,
connection_string: pulumi.Input[str],
container_name: pulumi.Input[str],
sas_ttl_as_iso8601: Optional[pulumi.Input[str]] = None):
"""
The properties of the Azure Storage endpoint for file upload.
:param pulumi.Input[str] connection_string: The connection string for the Azure Storage account to which files are uploaded.
:param pulumi.Input[str] container_name: The name of the root container where you upload files. The container need not exist but should be creatable using the connectionString specified.
:param pulumi.Input[str] sas_ttl_as_iso8601: The period of time for which the SAS URI generated by IoT Hub for file upload is valid. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload#file-upload-notification-configuration-options.
"""
pulumi.set(__self__, "connection_string", connection_string)
pulumi.set(__self__, "container_name", container_name)
if sas_ttl_as_iso8601 is not None:
pulumi.set(__self__, "sas_ttl_as_iso8601", sas_ttl_as_iso8601)
@property
@pulumi.getter(name="connectionString")
def connection_string(self) -> pulumi.Input[str]:
"""
The connection string for the Azure Storage account to which files are uploaded.
"""
return pulumi.get(self, "connection_string")
@connection_string.setter
def connection_string(self, value: pulumi.Input[str]):
pulumi.set(self, "connection_string", value)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> pulumi.Input[str]:
"""
The name of the root container where you upload files. The container need not exist but should be creatable using the connectionString specified.
"""
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: pulumi.Input[str]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter(name="sasTtlAsIso8601")
def sas_ttl_as_iso8601(self) -> Optional[pulumi.Input[str]]:
"""
The period of time for which the SAS URI generated by IoT Hub for file upload is valid. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload#file-upload-notification-configuration-options.
"""
return pulumi.get(self, "sas_ttl_as_iso8601")
@sas_ttl_as_iso8601.setter
def sas_ttl_as_iso8601(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sas_ttl_as_iso8601", value)
| 48.22956
| 476
| 0.694301
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from ._enums import *
__all__ = [
'CloudToDevicePropertiesArgs',
'EventHubPropertiesArgs',
'FeedbackPropertiesArgs',
'IotHubPropertiesArgs',
'IotHubSkuInfoArgs',
'IpFilterRuleArgs',
'MessagingEndpointPropertiesArgs',
'OperationsMonitoringPropertiesArgs',
'SharedAccessSignatureAuthorizationRuleArgs',
'StorageEndpointPropertiesArgs',
]
@pulumi.input_type
class CloudToDevicePropertiesArgs:
def __init__(__self__, *,
default_ttl_as_iso8601: Optional[pulumi.Input[str]] = None,
feedback: Optional[pulumi.Input['FeedbackPropertiesArgs']] = None,
max_delivery_count: Optional[pulumi.Input[int]] = None):
if default_ttl_as_iso8601 is not None:
pulumi.set(__self__, "default_ttl_as_iso8601", default_ttl_as_iso8601)
if feedback is not None:
pulumi.set(__self__, "feedback", feedback)
if max_delivery_count is not None:
pulumi.set(__self__, "max_delivery_count", max_delivery_count)
@property
@pulumi.getter(name="defaultTtlAsIso8601")
def default_ttl_as_iso8601(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "default_ttl_as_iso8601")
@default_ttl_as_iso8601.setter
def default_ttl_as_iso8601(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "default_ttl_as_iso8601", value)
@property
@pulumi.getter
def feedback(self) -> Optional[pulumi.Input['FeedbackPropertiesArgs']]:
return pulumi.get(self, "feedback")
@feedback.setter
def feedback(self, value: Optional[pulumi.Input['FeedbackPropertiesArgs']]):
pulumi.set(self, "feedback", value)
@property
@pulumi.getter(name="maxDeliveryCount")
def max_delivery_count(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "max_delivery_count")
@max_delivery_count.setter
def max_delivery_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_delivery_count", value)
@pulumi.input_type
class EventHubPropertiesArgs:
def __init__(__self__, *,
partition_count: Optional[pulumi.Input[int]] = None,
retention_time_in_days: Optional[pulumi.Input[float]] = None):
if partition_count is not None:
pulumi.set(__self__, "partition_count", partition_count)
if retention_time_in_days is not None:
pulumi.set(__self__, "retention_time_in_days", retention_time_in_days)
@property
@pulumi.getter(name="partitionCount")
def partition_count(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "partition_count")
@partition_count.setter
def partition_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "partition_count", value)
@property
@pulumi.getter(name="retentionTimeInDays")
def retention_time_in_days(self) -> Optional[pulumi.Input[float]]:
return pulumi.get(self, "retention_time_in_days")
@retention_time_in_days.setter
def retention_time_in_days(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "retention_time_in_days", value)
@pulumi.input_type
class FeedbackPropertiesArgs:
def __init__(__self__, *,
lock_duration_as_iso8601: Optional[pulumi.Input[str]] = None,
max_delivery_count: Optional[pulumi.Input[int]] = None,
ttl_as_iso8601: Optional[pulumi.Input[str]] = None):
if lock_duration_as_iso8601 is not None:
pulumi.set(__self__, "lock_duration_as_iso8601", lock_duration_as_iso8601)
if max_delivery_count is not None:
pulumi.set(__self__, "max_delivery_count", max_delivery_count)
if ttl_as_iso8601 is not None:
pulumi.set(__self__, "ttl_as_iso8601", ttl_as_iso8601)
@property
@pulumi.getter(name="lockDurationAsIso8601")
def lock_duration_as_iso8601(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "lock_duration_as_iso8601")
@lock_duration_as_iso8601.setter
def lock_duration_as_iso8601(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "lock_duration_as_iso8601", value)
@property
@pulumi.getter(name="maxDeliveryCount")
def max_delivery_count(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "max_delivery_count")
@max_delivery_count.setter
def max_delivery_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_delivery_count", value)
@property
@pulumi.getter(name="ttlAsIso8601")
def ttl_as_iso8601(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "ttl_as_iso8601")
@ttl_as_iso8601.setter
def ttl_as_iso8601(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ttl_as_iso8601", value)
@pulumi.input_type
class IotHubPropertiesArgs:
def __init__(__self__, *,
authorization_policies: Optional[pulumi.Input[Sequence[pulumi.Input['SharedAccessSignatureAuthorizationRuleArgs']]]] = None,
cloud_to_device: Optional[pulumi.Input['CloudToDevicePropertiesArgs']] = None,
comments: Optional[pulumi.Input[str]] = None,
enable_file_upload_notifications: Optional[pulumi.Input[bool]] = None,
event_hub_endpoints: Optional[pulumi.Input[Mapping[str, pulumi.Input['EventHubPropertiesArgs']]]] = None,
features: Optional[pulumi.Input[Union[str, 'Capabilities']]] = None,
ip_filter_rules: Optional[pulumi.Input[Sequence[pulumi.Input['IpFilterRuleArgs']]]] = None,
messaging_endpoints: Optional[pulumi.Input[Mapping[str, pulumi.Input['MessagingEndpointPropertiesArgs']]]] = None,
operations_monitoring_properties: Optional[pulumi.Input['OperationsMonitoringPropertiesArgs']] = None,
storage_endpoints: Optional[pulumi.Input[Mapping[str, pulumi.Input['StorageEndpointPropertiesArgs']]]] = None):
if authorization_policies is not None:
pulumi.set(__self__, "authorization_policies", authorization_policies)
if cloud_to_device is not None:
pulumi.set(__self__, "cloud_to_device", cloud_to_device)
if comments is not None:
pulumi.set(__self__, "comments", comments)
if enable_file_upload_notifications is not None:
pulumi.set(__self__, "enable_file_upload_notifications", enable_file_upload_notifications)
if event_hub_endpoints is not None:
pulumi.set(__self__, "event_hub_endpoints", event_hub_endpoints)
if features is not None:
pulumi.set(__self__, "features", features)
if ip_filter_rules is not None:
pulumi.set(__self__, "ip_filter_rules", ip_filter_rules)
if messaging_endpoints is not None:
pulumi.set(__self__, "messaging_endpoints", messaging_endpoints)
if operations_monitoring_properties is not None:
pulumi.set(__self__, "operations_monitoring_properties", operations_monitoring_properties)
if storage_endpoints is not None:
pulumi.set(__self__, "storage_endpoints", storage_endpoints)
@property
@pulumi.getter(name="authorizationPolicies")
def authorization_policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SharedAccessSignatureAuthorizationRuleArgs']]]]:
return pulumi.get(self, "authorization_policies")
@authorization_policies.setter
def authorization_policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SharedAccessSignatureAuthorizationRuleArgs']]]]):
pulumi.set(self, "authorization_policies", value)
@property
@pulumi.getter(name="cloudToDevice")
def cloud_to_device(self) -> Optional[pulumi.Input['CloudToDevicePropertiesArgs']]:
return pulumi.get(self, "cloud_to_device")
@cloud_to_device.setter
def cloud_to_device(self, value: Optional[pulumi.Input['CloudToDevicePropertiesArgs']]):
pulumi.set(self, "cloud_to_device", value)
@property
@pulumi.getter
def comments(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "comments")
@comments.setter
def comments(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "comments", value)
@property
@pulumi.getter(name="enableFileUploadNotifications")
def enable_file_upload_notifications(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enable_file_upload_notifications")
@enable_file_upload_notifications.setter
def enable_file_upload_notifications(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_file_upload_notifications", value)
@property
@pulumi.getter(name="eventHubEndpoints")
def event_hub_endpoints(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['EventHubPropertiesArgs']]]]:
return pulumi.get(self, "event_hub_endpoints")
@event_hub_endpoints.setter
def event_hub_endpoints(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['EventHubPropertiesArgs']]]]):
pulumi.set(self, "event_hub_endpoints", value)
@property
@pulumi.getter
def features(self) -> Optional[pulumi.Input[Union[str, 'Capabilities']]]:
return pulumi.get(self, "features")
@features.setter
def features(self, value: Optional[pulumi.Input[Union[str, 'Capabilities']]]):
pulumi.set(self, "features", value)
@property
@pulumi.getter(name="ipFilterRules")
def ip_filter_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IpFilterRuleArgs']]]]:
return pulumi.get(self, "ip_filter_rules")
@ip_filter_rules.setter
def ip_filter_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['IpFilterRuleArgs']]]]):
pulumi.set(self, "ip_filter_rules", value)
@property
@pulumi.getter(name="messagingEndpoints")
def messaging_endpoints(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['MessagingEndpointPropertiesArgs']]]]:
return pulumi.get(self, "messaging_endpoints")
@messaging_endpoints.setter
def messaging_endpoints(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['MessagingEndpointPropertiesArgs']]]]):
pulumi.set(self, "messaging_endpoints", value)
@property
@pulumi.getter(name="operationsMonitoringProperties")
def operations_monitoring_properties(self) -> Optional[pulumi.Input['OperationsMonitoringPropertiesArgs']]:
return pulumi.get(self, "operations_monitoring_properties")
@operations_monitoring_properties.setter
def operations_monitoring_properties(self, value: Optional[pulumi.Input['OperationsMonitoringPropertiesArgs']]):
pulumi.set(self, "operations_monitoring_properties", value)
@property
@pulumi.getter(name="storageEndpoints")
def storage_endpoints(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['StorageEndpointPropertiesArgs']]]]:
return pulumi.get(self, "storage_endpoints")
@storage_endpoints.setter
def storage_endpoints(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['StorageEndpointPropertiesArgs']]]]):
pulumi.set(self, "storage_endpoints", value)
@pulumi.input_type
class IotHubSkuInfoArgs:
def __init__(__self__, *,
capacity: pulumi.Input[float],
name: pulumi.Input[Union[str, 'IotHubSku']]):
pulumi.set(__self__, "capacity", capacity)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def capacity(self) -> pulumi.Input[float]:
return pulumi.get(self, "capacity")
@capacity.setter
def capacity(self, value: pulumi.Input[float]):
pulumi.set(self, "capacity", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[Union[str, 'IotHubSku']]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[Union[str, 'IotHubSku']]):
pulumi.set(self, "name", value)
@pulumi.input_type
class IpFilterRuleArgs:
def __init__(__self__, *,
action: pulumi.Input['IpFilterActionType'],
filter_name: pulumi.Input[str],
ip_mask: pulumi.Input[str]):
pulumi.set(__self__, "action", action)
pulumi.set(__self__, "filter_name", filter_name)
pulumi.set(__self__, "ip_mask", ip_mask)
@property
@pulumi.getter
def action(self) -> pulumi.Input['IpFilterActionType']:
return pulumi.get(self, "action")
@action.setter
def action(self, value: pulumi.Input['IpFilterActionType']):
pulumi.set(self, "action", value)
@property
@pulumi.getter(name="filterName")
def filter_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "filter_name")
@filter_name.setter
def filter_name(self, value: pulumi.Input[str]):
pulumi.set(self, "filter_name", value)
@property
@pulumi.getter(name="ipMask")
def ip_mask(self) -> pulumi.Input[str]:
return pulumi.get(self, "ip_mask")
@ip_mask.setter
def ip_mask(self, value: pulumi.Input[str]):
pulumi.set(self, "ip_mask", value)
@pulumi.input_type
class MessagingEndpointPropertiesArgs:
def __init__(__self__, *,
lock_duration_as_iso8601: Optional[pulumi.Input[str]] = None,
max_delivery_count: Optional[pulumi.Input[int]] = None,
ttl_as_iso8601: Optional[pulumi.Input[str]] = None):
if lock_duration_as_iso8601 is not None:
pulumi.set(__self__, "lock_duration_as_iso8601", lock_duration_as_iso8601)
if max_delivery_count is not None:
pulumi.set(__self__, "max_delivery_count", max_delivery_count)
if ttl_as_iso8601 is not None:
pulumi.set(__self__, "ttl_as_iso8601", ttl_as_iso8601)
@property
@pulumi.getter(name="lockDurationAsIso8601")
def lock_duration_as_iso8601(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "lock_duration_as_iso8601")
@lock_duration_as_iso8601.setter
def lock_duration_as_iso8601(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "lock_duration_as_iso8601", value)
@property
@pulumi.getter(name="maxDeliveryCount")
def max_delivery_count(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "max_delivery_count")
@max_delivery_count.setter
def max_delivery_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_delivery_count", value)
@property
@pulumi.getter(name="ttlAsIso8601")
def ttl_as_iso8601(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "ttl_as_iso8601")
@ttl_as_iso8601.setter
def ttl_as_iso8601(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ttl_as_iso8601", value)
@pulumi.input_type
class OperationsMonitoringPropertiesArgs:
def __init__(__self__, *,
events: Optional[pulumi.Input[Mapping[str, pulumi.Input[Union[str, 'OperationMonitoringLevel']]]]] = None):
if events is not None:
pulumi.set(__self__, "events", events)
@property
@pulumi.getter
def events(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Union[str, 'OperationMonitoringLevel']]]]]:
return pulumi.get(self, "events")
@events.setter
def events(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Union[str, 'OperationMonitoringLevel']]]]]):
pulumi.set(self, "events", value)
@pulumi.input_type
class SharedAccessSignatureAuthorizationRuleArgs:
def __init__(__self__, *,
key_name: pulumi.Input[str],
rights: pulumi.Input['AccessRights'],
primary_key: Optional[pulumi.Input[str]] = None,
secondary_key: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "key_name", key_name)
pulumi.set(__self__, "rights", rights)
if primary_key is not None:
pulumi.set(__self__, "primary_key", primary_key)
if secondary_key is not None:
pulumi.set(__self__, "secondary_key", secondary_key)
@property
@pulumi.getter(name="keyName")
def key_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "key_name")
@key_name.setter
def key_name(self, value: pulumi.Input[str]):
pulumi.set(self, "key_name", value)
@property
@pulumi.getter
def rights(self) -> pulumi.Input['AccessRights']:
return pulumi.get(self, "rights")
@rights.setter
def rights(self, value: pulumi.Input['AccessRights']):
pulumi.set(self, "rights", value)
@property
@pulumi.getter(name="primaryKey")
def primary_key(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "primary_key")
@primary_key.setter
def primary_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "primary_key", value)
@property
@pulumi.getter(name="secondaryKey")
def secondary_key(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "secondary_key")
@secondary_key.setter
def secondary_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secondary_key", value)
@pulumi.input_type
class StorageEndpointPropertiesArgs:
def __init__(__self__, *,
connection_string: pulumi.Input[str],
container_name: pulumi.Input[str],
sas_ttl_as_iso8601: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "connection_string", connection_string)
pulumi.set(__self__, "container_name", container_name)
if sas_ttl_as_iso8601 is not None:
pulumi.set(__self__, "sas_ttl_as_iso8601", sas_ttl_as_iso8601)
@property
@pulumi.getter(name="connectionString")
def connection_string(self) -> pulumi.Input[str]:
return pulumi.get(self, "connection_string")
@connection_string.setter
def connection_string(self, value: pulumi.Input[str]):
pulumi.set(self, "connection_string", value)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: pulumi.Input[str]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter(name="sasTtlAsIso8601")
def sas_ttl_as_iso8601(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "sas_ttl_as_iso8601")
@sas_ttl_as_iso8601.setter
def sas_ttl_as_iso8601(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sas_ttl_as_iso8601", value)
| true
| true
|
1c413dc2c3f76f7b654b39a45fcf9e5d7245fa97
| 7,102
|
py
|
Python
|
releasenotes/source/conf.py
|
openstack/python-saharaclient
|
2f01b878a9e07bc712fae9c6c2c5f823bd986dd6
|
[
"Apache-2.0"
] | 34
|
2015-01-26T21:39:46.000Z
|
2021-01-16T17:30:25.000Z
|
releasenotes/source/conf.py
|
openstack/python-saharaclient
|
2f01b878a9e07bc712fae9c6c2c5f823bd986dd6
|
[
"Apache-2.0"
] | null | null | null |
releasenotes/source/conf.py
|
openstack/python-saharaclient
|
2f01b878a9e07bc712fae9c6c2c5f823bd986dd6
|
[
"Apache-2.0"
] | 15
|
2015-03-13T23:24:59.000Z
|
2017-06-22T12:15:46.000Z
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Sahara Client Release Notes documentation build configuration file
extensions = [
'reno.sphinxext',
'openstackdocstheme'
]
# openstackdocstheme options
openstackdocs_repo_name = 'openstack/python-saharaclient'
openstackdocs_use_storyboard = True
openstackdocs_auto_name = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Saharaclient Release Notes'
copyright = u'2015, Sahara Developers'
# Release notes are version independent.
# The full version, including alpha/beta/rc tags.
release = ''
# The short X.Y version.
version = ''
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'openstackdocs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'SaharaClientReleaseNotesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'SaharaClientReleaseNotes.tex',
u'Sahara Client Release Notes Documentation',
u'Sahara Client Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'saharaclientreleasenotes',
u'Sahara Client Release Notes Documentation',
[u'Sahara Developers'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'SaharaClientReleaseNotes',
u'Sahara Client Release Notes Documentation',
u'Sahara Developers', 'SaharaClientReleaseNotes',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for Internationalization output ------------------------------
locale_dirs = ['locale/']
| 31.847534
| 79
| 0.714869
|
extensions = [
'reno.sphinxext',
'openstackdocstheme'
]
openstackdocs_repo_name = 'openstack/python-saharaclient'
openstackdocs_use_storyboard = True
openstackdocs_auto_name = False
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'Saharaclient Release Notes'
copyright = u'2015, Sahara Developers'
release = ''
version = ''
exclude_patterns = []
pygments_style = 'native'
html_theme = 'openstackdocs'
html_static_path = ['_static']
htmlhelp_basename = 'SaharaClientReleaseNotesdoc'
latex_elements = {
}
latex_documents = [
('index', 'SaharaClientReleaseNotes.tex',
u'Sahara Client Release Notes Documentation',
u'Sahara Client Developers', 'manual'),
]
man_pages = [
('index', 'saharaclientreleasenotes',
u'Sahara Client Release Notes Documentation',
[u'Sahara Developers'], 1)
]
texinfo_documents = [
('index', 'SaharaClientReleaseNotes',
u'Sahara Client Release Notes Documentation',
u'Sahara Developers', 'SaharaClientReleaseNotes',
'One line description of project.',
'Miscellaneous'),
]
# texinfo_no_detailmenu = False
# -- Options for Internationalization output ------------------------------
locale_dirs = ['locale/']
| true
| true
|
1c413deb381bf591a0b5a224de88ea590d6f46df
| 3,829
|
py
|
Python
|
scripts/MDCS_UC.py
|
Esri/mdcs-py
|
8ecaf9b5cda5c33537cae4a21809b12894598cba
|
[
"Apache-2.0"
] | 53
|
2015-02-04T22:42:18.000Z
|
2022-03-10T20:19:31.000Z
|
scripts/MDCS_UC.py
|
Esri/mdcs-py
|
8ecaf9b5cda5c33537cae4a21809b12894598cba
|
[
"Apache-2.0"
] | 40
|
2015-07-22T21:52:20.000Z
|
2022-03-29T09:02:59.000Z
|
scripts/MDCS_UC.py
|
Esri/mdcs-py
|
8ecaf9b5cda5c33537cae4a21809b12894598cba
|
[
"Apache-2.0"
] | 25
|
2015-02-04T22:43:40.000Z
|
2021-12-19T10:50:23.000Z
|
# ------------------------------------------------------------------------------
# Copyright 2013 Esri
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
# Name: MDCS_UC.py
# Description: A class to implement all user functions or to extend the built in MDCS functions/commands chain.
# Version: 20201230
# Requirements: ArcGIS 10.1 SP1
# Author: Esri Imagery Workflows team
# ------------------------------------------------------------------------------
#!/usr/bin/env python
import os
import sys
import arcpy
class UserCode:
def __init__(self):
pass # initialize variables that need to be shared between multiple user commands.
def sample00(self, data):
base = data['base'] # using Base class for its XML specific common functions. (getXMLXPathValue, getXMLNodeValue, getXMLNode)
xmlDOM = data['mdcs'] # access to MDCS config file
command_used = base.getXMLNodeValue(xmlDOM, 'Command')
workspace = data['workspace']
md = data['mosaicdataset']
log = data['log']
log.Message('%s\\%s' % (workspace, md), 0)
return True
def sample01(self, data):
log = data['log'] # How to use logging within the user function.
log.Message('hello world', 0)
return True
def sample02(self, data):
log = data['log'] # How to use logging within the user function.
log.Message('Returning multiple values', 0)
data['useResponse'] = True
data['response'] = ['msg0', 'msg1', 'msg2']
data['status'] = True # overall function status
return True # True must be returned if data['useResponse'] is required. data['response'] can be used to return multiple values.
def customCV(self, data):
workspace = data['workspace']
md = data['mosaicdataset']
ds = os.path.join(workspace, md)
ds_cursor = arcpy.da.UpdateCursor(ds)
if (ds_cursor is not None):
print ('Calculating values..')
row = ds_cursor.next()
while(row is not None):
row.setValue('MinPS', 0)
row.setValue('MaxPS', 300)
WRS_Path = row.getValue('WRS_Path')
WRS_Row = row.getValue('WRS_Row')
if (WRS_Path is not None and
WRS_Row is not None):
PR = (WRS_Path * 1000) + WRS_Row
row.setValue('PR', PR)
AcquisitionData = row.getValue('AcquisitionDate')
if (AcquisitionData is not None):
AcquisitionData = str(AcquisitionData).replace('-', '/')
day = int(AcquisitionData.split()[0].split('/')[1])
row.setValue('Month', day)
grp_name = row.getValue('GroupName')
if (grp_name is not None):
CMAX_INDEX = 16
if (len(grp_name) >= CMAX_INDEX):
row.setValue('DayOfYear', int(grp_name[13:CMAX_INDEX]))
row.setValue('Name', grp_name.split('_')[0] + '_' + row.getValue('Tag'))
ds_cursor.updateRow(row)
row = ds_cursor.next()
del ds_cursor
return True
| 44.011494
| 141
| 0.557848
|
import os
import sys
import arcpy
class UserCode:
def __init__(self):
pass
def sample00(self, data):
base = data['base']
xmlDOM = data['mdcs']
command_used = base.getXMLNodeValue(xmlDOM, 'Command')
workspace = data['workspace']
md = data['mosaicdataset']
log = data['log']
log.Message('%s\\%s' % (workspace, md), 0)
return True
def sample01(self, data):
log = data['log']
log.Message('hello world', 0)
return True
def sample02(self, data):
log = data['log']
log.Message('Returning multiple values', 0)
data['useResponse'] = True
data['response'] = ['msg0', 'msg1', 'msg2']
data['status'] = True
return True
def customCV(self, data):
workspace = data['workspace']
md = data['mosaicdataset']
ds = os.path.join(workspace, md)
ds_cursor = arcpy.da.UpdateCursor(ds)
if (ds_cursor is not None):
print ('Calculating values..')
row = ds_cursor.next()
while(row is not None):
row.setValue('MinPS', 0)
row.setValue('MaxPS', 300)
WRS_Path = row.getValue('WRS_Path')
WRS_Row = row.getValue('WRS_Row')
if (WRS_Path is not None and
WRS_Row is not None):
PR = (WRS_Path * 1000) + WRS_Row
row.setValue('PR', PR)
AcquisitionData = row.getValue('AcquisitionDate')
if (AcquisitionData is not None):
AcquisitionData = str(AcquisitionData).replace('-', '/')
day = int(AcquisitionData.split()[0].split('/')[1])
row.setValue('Month', day)
grp_name = row.getValue('GroupName')
if (grp_name is not None):
CMAX_INDEX = 16
if (len(grp_name) >= CMAX_INDEX):
row.setValue('DayOfYear', int(grp_name[13:CMAX_INDEX]))
row.setValue('Name', grp_name.split('_')[0] + '_' + row.getValue('Tag'))
ds_cursor.updateRow(row)
row = ds_cursor.next()
del ds_cursor
return True
| true
| true
|
1c413df0889fc65a6a492ace3be753b86f851dfe
| 3,720
|
py
|
Python
|
data/ImShow.py
|
ne1199/RobustAutoencoder
|
2d32750fa08a88fe19ecb3ede6d76e50ecceb91d
|
[
"MIT"
] | 1
|
2020-05-18T02:57:13.000Z
|
2020-05-18T02:57:13.000Z
|
data/ImShow.py
|
yli96/RobustAutoencoder
|
01de32f5d670280ca8f8f9a6f6d704930fc266b4
|
[
"MIT"
] | null | null | null |
data/ImShow.py
|
yli96/RobustAutoencoder
|
01de32f5d670280ca8f8f9a6f6d704930fc266b4
|
[
"MIT"
] | 1
|
2021-07-09T07:54:03.000Z
|
2021-07-09T07:54:03.000Z
|
import numpy as np
"""
I get this file from MNIST dataset tutorial.
"""
def scale_to_unit_interval(ndar, eps=1e-8):
""" Scales all values in the ndarray ndar to be between 0 and 1 """
ndar = ndar.copy()
ndar -= ndar.min()
ndar *= 1.0 / (ndar.max() + eps)
return ndar
def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),
scale_rows_to_unit_interval=True,
output_pixel_vals=True):
"""
Source : http://deeplearning.net/tutorial/utilities.html#how-to-plot
Transform an array with one flattened image per row, into an array in
which images are reshaped and layed out like tiles on a floor.
"""
assert len(img_shape) == 2
assert len(tile_shape) == 2
assert len(tile_spacing) == 2
# The expression below can be re-written in a more C style as
# follows :
#
# out_shape = [0,0]
# out_shape[0] = (img_shape[0] + tile_spacing[0]) * tile_shape[0] -
# tile_spacing[0]
# out_shape[1] = (img_shape[1] + tile_spacing[1]) * tile_shape[1] -
# tile_spacing[1]
out_shape = [(ishp + tsp) * tshp - tsp for ishp, tshp, tsp
in zip(img_shape, tile_shape, tile_spacing)]
if isinstance(X, tuple):
assert len(X) == 4
# Create an output numpy ndarray to store the image
if output_pixel_vals:
out_array = np.zeros((out_shape[0], out_shape[1], 4), dtype='uint8')
else:
out_array = np.zeros((out_shape[0], out_shape[1], 4), dtype=X.dtype)
#colors default to 0, alpha defaults to 1 (opaque)
if output_pixel_vals:
channel_defaults = [0, 0, 0, 255]
else:
channel_defaults = [0., 0., 0., 1.]
for i in range(4):
if X[i] is None:
# if channel is None, fill it with zeros of the correct
# dtype
out_array[:, :, i] = np.zeros(out_shape,
dtype='uint8' if output_pixel_vals else out_array.dtype
) + channel_defaults[i]
else:
# use a recurrent call to compute the channel and store it
# in the output
out_array[:, :, i] = tile_raster_images(X[i], img_shape, tile_shape,
tile_spacing, scale_rows_to_unit_interval, output_pixel_vals)
return out_array
else:
# if we are dealing with only one channel
H, W = img_shape
Hs, Ws = tile_spacing
# generate a matrix to store the output
out_array = np.zeros(out_shape, dtype='uint8' if output_pixel_vals else X.dtype)
for tile_row in range(tile_shape[0]):
for tile_col in range(tile_shape[1]):
if tile_row * tile_shape[1] + tile_col < X.shape[0]:
if scale_rows_to_unit_interval:
# if we should scale values to be between 0 and 1
# do this by calling the `scale_to_unit_interval`
# function
this_img = scale_to_unit_interval(X[tile_row * tile_shape[1] + tile_col].reshape(img_shape))
else:
this_img = X[tile_row * tile_shape[1] + tile_col].reshape(img_shape)
# add the slice to the corresponding position in the
# output array
out_array[
tile_row * (H+Hs): tile_row * (H + Hs) + H,
tile_col * (W+Ws): tile_col * (W + Ws) + W
] \
= this_img * (255 if output_pixel_vals else 1)
return out_array
| 40.434783
| 117
| 0.550538
|
import numpy as np
def scale_to_unit_interval(ndar, eps=1e-8):
ndar = ndar.copy()
ndar -= ndar.min()
ndar *= 1.0 / (ndar.max() + eps)
return ndar
def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),
scale_rows_to_unit_interval=True,
output_pixel_vals=True):
assert len(img_shape) == 2
assert len(tile_shape) == 2
assert len(tile_spacing) == 2
out_shape = [(ishp + tsp) * tshp - tsp for ishp, tshp, tsp
in zip(img_shape, tile_shape, tile_spacing)]
if isinstance(X, tuple):
assert len(X) == 4
if output_pixel_vals:
out_array = np.zeros((out_shape[0], out_shape[1], 4), dtype='uint8')
else:
out_array = np.zeros((out_shape[0], out_shape[1], 4), dtype=X.dtype)
if output_pixel_vals:
channel_defaults = [0, 0, 0, 255]
else:
channel_defaults = [0., 0., 0., 1.]
for i in range(4):
if X[i] is None:
out_array[:, :, i] = np.zeros(out_shape,
dtype='uint8' if output_pixel_vals else out_array.dtype
) + channel_defaults[i]
else:
out_array[:, :, i] = tile_raster_images(X[i], img_shape, tile_shape,
tile_spacing, scale_rows_to_unit_interval, output_pixel_vals)
return out_array
else:
H, W = img_shape
Hs, Ws = tile_spacing
out_array = np.zeros(out_shape, dtype='uint8' if output_pixel_vals else X.dtype)
for tile_row in range(tile_shape[0]):
for tile_col in range(tile_shape[1]):
if tile_row * tile_shape[1] + tile_col < X.shape[0]:
if scale_rows_to_unit_interval:
this_img = scale_to_unit_interval(X[tile_row * tile_shape[1] + tile_col].reshape(img_shape))
else:
this_img = X[tile_row * tile_shape[1] + tile_col].reshape(img_shape)
out_array[
tile_row * (H+Hs): tile_row * (H + Hs) + H,
tile_col * (W+Ws): tile_col * (W + Ws) + W
] \
= this_img * (255 if output_pixel_vals else 1)
return out_array
| true
| true
|
1c413e7446013f7c9550b5712aa532c73dabe03e
| 62,599
|
py
|
Python
|
ipapi/base/ipt_loose_pipeline.py
|
tpmp-inra/ipso_cli
|
6de4097dcb1536b546d9e2cdeb61057e5f931537
|
[
"MIT"
] | null | null | null |
ipapi/base/ipt_loose_pipeline.py
|
tpmp-inra/ipso_cli
|
6de4097dcb1536b546d9e2cdeb61057e5f931537
|
[
"MIT"
] | 1
|
2020-11-10T15:23:41.000Z
|
2020-11-10T15:23:41.000Z
|
ipapi/base/ipt_loose_pipeline.py
|
tpmp-inra/ipso_cli
|
6de4097dcb1536b546d9e2cdeb61057e5f931537
|
[
"MIT"
] | null | null | null |
from ipapi.tools.folders import ipso_folders
from uuid import uuid4
import json
from datetime import datetime as dt
from timeit import default_timer as timer
import itertools
from typing import Union
import logging
import csv
import os
import numpy as np
from ipapi.base.ipt_abstract import IptParam, IptBase, IptParamHolder
from ipapi.base.ipt_functional import get_ipt_class
from ipapi.base import ip_common as ipc
from ipapi.base.ipt_strict_pipeline import IptStrictPipeline
from ipapi.base.ip_abstract import BaseImageProcessor
import ipapi.tools.error_holder as eh
from ipapi.tools.common_functions import format_time
from ipapi.tools.regions import RectangleRegion
logger = logging.getLogger(__name__)
last_script_version = "0.2.0.0"
class MosaicData(object):
def __init__(self, pipeline, enabled, images):
super().__init__()
self.enabled = enabled
self.images = images
if isinstance(self.images, str):
self.images = [
[i for i in line.split(",")] for line in self.images.split("\n")
]
self.pipeline = pipeline
class PipelineSettings(IptParamHolder):
def __init__(self, pipeline, **kwargs):
self.update_feedback_items = []
super(PipelineSettings, self).__init__(**kwargs)
self.mosaic = MosaicData(
pipeline=pipeline,
enabled=kwargs.get("mosaic_enabled", kwargs.get("build_mosaic", True) == 1),
images=kwargs.get(
"mosaic_images", kwargs.get("mosaic_items", [["source", "mask"]])
),
)
def build_params(self):
self.add_checkbox(
name="show_tool_result",
desc="Show a result image for each tool",
default_value=1,
)
self.add_checkbox(
name="show_group_result",
desc="Show a result image for each group",
default_value=0,
)
self.add_checkbox(
name="debug_mode",
desc="Display debug images",
default_value=0,
hint="Display module's intermediary images",
)
self.add_checkbox(
name="allow_step_mosaics",
desc="Allow mosaics for steps",
default_value=1,
hint="If checked, some steps will return mosaics instead of single images",
)
self.add_checkbox(
name="show_source_image",
desc="Show source image/mask for each tool",
default_value=0,
)
self.add_checkbox(
name="tool_group_name_watermark",
desc="Add a watermark with the name of the generating source to each output image",
default_value=0,
)
self.add_combobox(
name="stop_on",
desc="Stop processing on error level",
default_value=eh.ERR_LVL_EXCEPTION,
values={i: eh.error_level_to_str(i) for i in [0, 10, 20, 30, 35, 40, 50]},
hint="If any error of the selected level or higher happens the process will halt",
)
def params_to_dict(
self,
include_input: bool = True,
include_output: bool = False,
include_neutral: bool = False,
):
dic = {}
for p in self.gizmos:
if (
(include_input and p.is_input)
or (include_output and p.is_output)
or (include_neutral and p.is_neutral)
):
dic[p.name] = p.value
dic["mosaic_enabled"] = self.mosaic.enabled
dic["mosaic_images"] = self.mosaic.images
return dic
def items(self):
return self.gizmos + [self.mosaic]
@property
def node_count(self):
return len(self.items())
class Node(object):
def __init__(self, **kwargs):
self.uuid = kwargs.get("uuid", str(uuid4()))
if not self.uuid:
self.uuid = str(uuid4())
self.parent = kwargs.get("parent")
self.last_result = {}
def get_relevant_image(self, exclude_demo: bool = False):
if not exclude_demo:
demo_image = self.last_result.get("demo_image", None)
if demo_image is not None:
ri = demo_image
if self.output_type == ipc.IO_IMAGE:
ri = self.last_result.get(
"image", np.full((100, 100, 3), ipc.C_FUCHSIA, np.uint8)
)
elif self.output_type == ipc.IO_MASK:
ri = self.last_result.get(
"mask", np.full((100, 100, 3), ipc.C_FUCHSIA, np.uint8)
)
elif self.output_type in [ipc.IO_DATA, ipc.IO_ROI, ipc.IO_NONE]:
ri = self.last_result.get(
"image",
self.last_result.get(
"mask", np.full((100, 100, 3), ipc.C_FUCHSIA, np.uint8)
),
)
else:
ri = np.full((100, 100, 3), ipc.C_FUCHSIA, np.uint8)
if self.root.parent.tool_group_name_watermark:
ri = ri.copy()
BaseImageProcessor.draw_text(
img=ri,
text=self.name,
fnt_color=ipc.C_WHITE,
background_color=ipc.C_BLACK,
)
return ri
def get_feedback_image(self, data: dict):
demo_image = data.get("demo_image", None)
if demo_image is not None:
fi = demo_image
else:
mask = data.get("mask", None)
image = data.get("image", None)
if (
mask is not None
and image is not None
and self.root.parent.allow_step_mosaics
):
h = max(mask.shape[0], image.shape[0])
w = max(mask.shape[1], image.shape[1])
canvas = ipc.enclose_image(
a_cnv=np.full(
shape=(h + 4, w * 2 + 6, 3),
fill_value=ipc.C_SILVER,
dtype=np.uint8,
),
img=image,
rect=RectangleRegion(left=2, top=2, width=w, height=h),
)
fi = ipc.enclose_image(
a_cnv=canvas,
img=np.dstack((mask, mask, mask)),
rect=RectangleRegion(left=w + 4, top=2, width=w, height=h),
)
elif mask is not None:
fi = mask
elif image is not None:
fi = image
else:
fi = np.full((100, 100, 3), ipc.C_FUCHSIA, np.uint8)
if self.root.parent.tool_group_name_watermark:
fi = fi.copy()
BaseImageProcessor.draw_text(
img=fi,
text=self.name,
fnt_color=ipc.C_WHITE,
background_color=ipc.C_BLACK,
)
return fi
def do_call_back(
self,
call_back,
res,
msg,
data,
is_progress=True,
force_call_back=False,
**kwargs,
):
if call_back is not None:
call_back(
eh.error_level_to_str(res),
msg,
data
if call_back is not None
and (force_call_back or not self.root.parent.silent)
else None,
self.absolute_index + 1 if is_progress else -1,
self.absolute_count if is_progress else -1,
)
else:
if isinstance(res, int) and (res >= logging.WARNING):
eh.log_data(log_msg=msg, log_level=res, target_logger=logger)
md = np.array(self.root.parent.settings.mosaic.images)
wrapper = self.root.parent.wrapper
needed_images = wrapper.forced_storage_images_list
if isinstance(data, (GroupNode, ModuleNode)):
dn = data.name
if dn in md:
self.root.parent.stored_mosaic_images[dn] = self.get_relevant_image()
if dn in needed_images:
wrapper.store_image(
image=self.get_relevant_image(exclude_demo=True),
text=dn,
force_store=True,
)
elif isinstance(data, BaseImageProcessor):
for d in data.image_list:
if d["name"] in md:
self.root.parent.stored_mosaic_images[d["name"]] = d["image"]
self.root.parent.update_error_level(res)
@property
def root(self):
root = self
while root.parent is not None and not isinstance(root.parent, LoosePipeline):
root = root.parent
return root
@property
def absolute_index(self):
if isinstance(self, GroupNode):
if isinstance(self.parent, LoosePipeline):
return self.absolute_count
lst = self.root.as_pivot_list(index=self, types=("groups"))
elif isinstance(self, ModuleNode):
lst = self.root.as_pivot_list(index=self, types=("modules"))
else:
return -2
return len(lst.get("before", ()))
@property
def absolute_count(self):
if isinstance(self, GroupNode):
return len(list(self.root.iter_items(types=("groups"))))
elif isinstance(self, ModuleNode):
return len(list(self.root.iter_items(types=("modules"))))
else:
return -2
@property
def stop_processing(self):
return self.root.parent.stop_processing
@stop_processing.setter
def stop_processing(self, value):
self.root.parent.stop_processing = value
@property
def is_module(self):
return isinstance(self, ModuleNode)
@property
def is_group(self):
return isinstance(self, GroupNode)
@property
def is_root(self):
return isinstance(self.parent, LoosePipeline)
class ModuleNode(Node):
def __init__(self, **kwargs):
Node.__init__(self, **kwargs)
self.enabled = kwargs.get("enabled", 1)
self.tool = kwargs.get("tool")
self.tool.owner = self
def _execute_standard(self, tool, call_back=None, target_module: str = ""):
res = {}
wrapper = self.root.parent.wrapper
if self.root.parent.show_source_image:
if wrapper is not None and wrapper.current_image is not None:
self.do_call_back(
call_back=call_back,
res=logging.INFO,
msg="",
data={
"plant_name": wrapper.plant,
"name": f"{self.name} (source image)",
"image": wrapper.current_image,
"luid": wrapper.luid,
"data": {},
},
)
if wrapper is not None and wrapper.mask is not None:
self.do_call_back(
call_back=call_back,
res=logging.INFO,
msg="",
data={
"plant_name": wrapper.plant,
"name": f"{self.name} (source mask)",
"image": wrapper.mask,
"luid": wrapper.luid,
"data": {},
},
)
if tool.process_wrapper(wrapper=wrapper):
# Get ROI
if self.output_type == ipc.IO_ROI:
func = getattr(tool, "generate_roi", None)
if callable(func):
roi = func(wrapper=wrapper)
if roi is not None:
res["roi"] = roi
if not wrapper.store_images:
wrapper.store_image(
image=roi.draw_to(
dst_img=wrapper.current_image,
line_width=max(4, wrapper.width // 200),
),
text=self.name,
force_store=True,
)
else:
self.do_call_back(
call_back=call_back,
res=logging.ERROR,
msg=f"Failed to generate ROI from {self.name}",
data=wrapper if self.root.parent.debug_mode else self,
)
# Get data
if hasattr(tool, "data_dict"):
res["data"] = tool.data_dict
# Get mask
if self.output_type == ipc.IO_MASK:
res["mask"] = tool.result
if tool.result is None:
self.do_call_back(
call_back=call_back,
res=logging.WARNING,
msg=f"Failed to generate mask from {self.name}",
data=None,
)
# Get image
if (
self.output_type in [ipc.IO_MASK, ipc.IO_NONE, ipc.IO_ROI]
and tool.demo_image is not None
):
res["image"] = tool.demo_image
elif self.output_type == ipc.IO_ROI:
res["image"] = wrapper.draw_rois(
img=wrapper.current_image, rois=[res["roi"]]
)
elif self.output_type == ipc.IO_DATA:
if tool.demo_image is not None:
res["image"] = tool.demo_image
else:
res["image"] = wrapper.current_image
elif self.output_type == ipc.IO_IMAGE and isinstance(tool.result, np.ndarray):
res["image"] = tool.result
# Get demo image
if tool.demo_image is not None:
res["demo_image"] = tool.demo_image
return res
def _execute_grid_search(self, call_back):
def inner_call_back(res, msg, data, step, total):
if call_back is not None:
call_back(
res,
msg,
data,
step,
total,
)
param_settings_list = [p.decode_grid_search_options() for p in self.tool.gizmos]
size = 1
for ps in param_settings_list:
if len(ps) > 0:
size *= len(ps)
inner_call_back(
res="GRID_SEARCH_START",
msg="",
data=None,
step=0,
total=size,
)
procs = list(itertools.product(*param_settings_list))
keys = [p.name for p in self.tool.gizmos]
wrapper = self.root.parent.wrapper
for i, p in enumerate(procs):
res = self._execute_standard(
tool=self.tool.__class__(
**{k: (int(v) if str.isdigit(v) else v) for k, v in zip(keys, p)}
),
)
inner_call_back(
res="GRID_SEARCH_OK" if res else "GRID_SEARCH_NOK",
msg="Failed to process element",
data={
"plant_name": wrapper.plant,
"name": wrapper.short_name,
"image": self.get_feedback_image(res),
"data": res.get("data", {}),
"luid": wrapper.luid,
},
step=i + 1,
total=size,
)
inner_call_back(
res="GRID_SEARCH_END",
msg="",
data=None,
step=size,
total=size,
)
def execute(self, **kwargs):
call_back = kwargs.get("call_back", None)
target_module = kwargs.get("target_module", "")
grid_search_mode = kwargs.get("grid_search_mode", "")
wrapper = self.root.parent.wrapper
if not self.last_result:
if hasattr(self.tool, "output_path") and self.root.parent.image_output_path:
self.tool.output_path = self.root.parent.image_output_path
if target_module == self.uuid and grid_search_mode:
self._execute_grid_search(call_back=call_back)
self.last_result = {}
else:
before = timer()
self.last_result = self._execute_standard(
tool=self.tool,
call_back=call_back,
target_module=target_module,
)
if self.root.parent.debug_mode:
data = wrapper
elif self.root.parent.show_tool_result:
data = self
else:
data = None
if self.last_result:
self.do_call_back(
call_back=call_back,
res=logging.INFO,
msg=f"Successfully processed {self.name} in {format_time(timer() - before)}",
data=data,
)
else:
if ipc.ToolFamily.ASSERT in self.tool.use_case:
self.do_call_back(
call_back=call_back,
res=logging.ERROR,
msg=f'Assertion "{self.tool.name}" failed for {self.name}',
data=wrapper
if self.root.parent.debug_mode or self.uuid == target_module
else self,
)
else:
self.do_call_back(
call_back=call_back,
res=logging.ERROR,
msg=f"Failed to process {self.name} in {format_time(timer() - before)}",
data=wrapper
if self.root.parent.debug_mode or self.uuid == target_module
else self,
)
return self.last_result
def invalidate(self):
self.last_result = {}
def copy(self, parent):
return ModuleNode(
parent=parent,
tool=self.tool,
enabled=self.enabled,
uuid=self.uuid,
)
def to_code(self, indent: int):
pass
def to_json(self):
return {
"node_type": "module",
"tool": self.tool.to_json(),
"enabled": self.enabled,
"uuid": self.uuid,
}
@classmethod
def from_json(cls, parent, json_data: dict):
if json_data["node_type"] != "module":
return None
tool = IptBase.from_json(json_data["tool"])
if isinstance(tool, Exception):
eh.log_data(
log_msg=f"Failed to load module: {repr(tool)}",
log_level=eh.ERR_LVL_EXCEPTION,
target_logger=logger,
)
elif isinstance(tool, IptBase):
return ModuleNode(
tool=tool,
parent=parent,
enabled=json_data["enabled"],
uuid=json_data["uuid"],
)
def sugar_name(self):
if self.tool.has_param("roi_name") and self.tool.get_value_of("roi_name"):
return f'{self.tool.name} {self.tool.get_value_of("roi_name")}'
elif self.tool.has_param("channel"):
return f'{self.tool.name} {self.tool.get_value_of("channel")}'
elif self.tool.name == "Morphology":
return f'{self.tool.name} {self.tool.get_value_of("morph_op")}'
elif self.tool.has_param("roi_names") and self.tool.get_value_of("roi_names"):
return f'{self.tool.name} {self.tool.get_value_of("roi_names")}'
else:
return self.tool.name
@property
def input_type(self):
if isinstance(self.tool, IptBase):
return self.tool.input_type
else:
return ipc.IO_NONE
@property
def output_type(self):
if isinstance(self.tool, IptBase):
return self.tool.output_type
else:
return ipc.IO_NONE
@property
def name(self):
sn = self.sugar_name()
nodes = [
node
for node in self.root.as_pivot_list(index=self, types=("modules",))["before"]
if node.sugar_name() == sn
]
return sn if len(nodes) == 0 else f"{sn} ({len(nodes)})"
class GroupNode(Node):
default_execution_filters = {
k: "" for k in ["experiment", "plant", "date", "time", "camera", "view_option"]
}
def __init__(self, **kwargs):
Node.__init__(self, **kwargs)
self.merge_mode = kwargs.get("merge_mode")
self.name = kwargs.get("name", "")
self.nodes = kwargs.get("nodes", [])
self.source = kwargs.get("source", "source")
self.no_delete = kwargs.get("no_delete", False)
self.execute_filters = kwargs.get(
"execute_filters",
self.default_execution_filters,
)
self.last_result = {}
def add_module(self, tool, enabled=1, uuid: str = "") -> ModuleNode:
new_module = ModuleNode(parent=self, tool=tool, enabled=enabled, uuid=uuid)
self.nodes.append(new_module)
return new_module
def add_group(
self,
merge_mode: str,
name: str = "",
source="",
no_delete: bool = False,
uuid: str = "",
):
# Set source
if not source:
if len(self.nodes) > 0 and isinstance(self.nodes[-1], GroupNode):
source = self.nodes[-1].uuid
elif len(self.nodes) == 0:
source = "source"
else:
source = "last_output"
# Set unique name
root = self.root
group_names = [group.name for group in root.iter_items(types=("groups",))]
if not name or name in group_names:
if not name:
name = "Group"
i = 1
while f"{name} {i}" in group_names:
i += 1
name = f"{name} {i}"
# Create group
new_node = GroupNode(
parent=self, merge_mode=merge_mode, name=name, source=source, uuid=uuid
)
self.nodes.append(new_node)
return new_node
def remove_node(self, node: Union[int, object]):
if isinstance(node, int):
node = self.nodes[node]
if not isinstance(node, GroupNode) or not node.no_delete:
self.root.invalidate(node)
self.nodes.remove(node)
def insert_node(self, index, node):
if isinstance(node, GroupNode) or isinstance(node, ModuleNode):
self.nodes.insert(min(0, max(index, len(self.nodes))), node)
def get_source_image(self, source: str, call_back):
wrapper = self.root.parent.wrapper
if source == "source":
return wrapper.source_image
elif source == "last_output":
nodes = self.root.as_pivot_list(index=self)
for node in reversed(nodes["before"]):
if (
node.enabled
and node.output_type == ipc.IO_IMAGE
and node.last_result.get("image", None) is not None
):
return node.last_result["image"]
break
else:
return wrapper.current_image
else:
node = self.root.find_by_uuid(source)
if (
node is None
or node.last_result.get("image", None) is None
or node.enabled == 0
):
self.last_result = {}
self.do_call_back(
call_back=call_back,
res=logging.WARNING,
msg=f"{self.name} - Failed to retrieve source {source}, selecting last output instead",
data=None,
is_progress=False,
)
return self.get_source_image(source="last_output", call_back=call_back)
else:
return node.last_result.get("image")
def execute(self, **kwargs):
before = timer()
call_back = kwargs.get("call_back", None)
target_module = kwargs.get("target_module", "")
wrapper = self.root.parent.wrapper
wrapper.current_image = self.get_source_image(
source=self.source, call_back=call_back
)
rois = []
only_rois = False
for node in self.nodes:
if not node.enabled:
continue
if node.output_type != ipc.IO_ROI:
break
else:
only_rois = True
is_current_image_changed = False
def add_roi(wrapper: BaseImageProcessor, roi_list: list, roi, node):
if roi is None:
logger.warning(f"Missing ROI for {node.name}")
else:
if isinstance(roi, list):
roi_list.extend(roi)
wrapper.add_rois(roi_list=roi)
else:
roi_list.append(roi)
wrapper.add_roi(new_roi=roi)
if self.merge_mode == ipc.MERGE_MODE_NONE:
for node in self.nodes:
if not node.enabled:
continue
if node.is_group and not node.matches_filters:
self.do_call_back(
call_back=call_back,
res=logging.INFO,
msg=f"Group {node.name} did not match filters, skipped",
data=None,
is_progress=False,
)
continue
res = node.execute(**kwargs)
if self.stop_processing:
return res
if res:
if node.output_type == ipc.IO_DATA:
wrapper.csv_data_holder.data_list.update(res["data"])
elif node.output_type == ipc.IO_ROI:
add_roi(
wrapper=wrapper,
roi_list=rois,
roi=res.get("roi", None),
node=node,
)
else:
self.last_result["outcome"] = False
if node.uuid == target_module:
self.stop_processing = True
return self.last_result
elif self.merge_mode == ipc.MERGE_MODE_CHAIN:
for node in self.nodes:
if not node.enabled:
continue
if node.is_group and not node.matches_filters:
self.do_call_back(
call_back=call_back,
res=logging.INFO,
msg=f"Group {node.name} did not match filters, skipped",
data=None,
is_progress=False,
)
continue
res = node.execute(**kwargs)
if self.stop_processing:
return res
if res:
if node.output_type == ipc.IO_IMAGE:
wrapper.current_image = res["image"]
is_current_image_changed = True
elif node.output_type == ipc.IO_MASK:
wrapper.mask = res["mask"]
elif node.output_type == ipc.IO_DATA:
wrapper.csv_data_holder.data_list.update(res["data"])
elif node.output_type == ipc.IO_ROI:
add_roi(
wrapper=wrapper,
roi_list=rois,
roi=res.get("roi", None),
node=node,
)
else:
self.last_result["outcome"] = False
if node.uuid == target_module:
self.stop_processing = True
return node.last_result
elif self.merge_mode in [ipc.MERGE_MODE_AND, ipc.MERGE_MODE_OR]:
images = []
for node in self.nodes:
if not node.enabled:
continue
if node.is_group and not node.matches_filters:
self.do_call_back(
call_back=call_back,
res=logging.INFO,
msg=f"Group {node.name} did not match filters, skipped",
data=None,
is_progress=False,
)
continue
res = node.execute(**kwargs)
if self.stop_processing:
return res
if res:
if node.output_type == ipc.IO_IMAGE:
images.append(res["image"])
elif node.output_type == ipc.IO_MASK:
images.append(res["mask"])
elif node.output_type == ipc.IO_ROI:
add_roi(
wrapper=wrapper,
roi_list=rois,
roi=res.get("roi", None),
node=node,
)
else:
self.last_result["outcome"] = False
if node.uuid == target_module:
self.stop_processing = True
return node.last_result
if self.merge_mode == ipc.MERGE_MODE_AND:
res = wrapper.multi_and(images)
else:
res = wrapper.multi_or(images)
if self.output_type == ipc.IO_IMAGE:
wrapper.current_image = res
is_current_image_changed = True
elif self.output_type == ipc.IO_MASK:
wrapper.mask = res
else:
self.do_call_back(
call_back=call_back,
res=logging.ERROR,
msg=f'Invalid output type "{self.output_type}" for merge mode "{self.merge_mode}" in {self.name}',
data=None,
is_progress=False,
)
self.last_result["outcome"] = False
else:
pass
if only_rois and rois:
self.last_result["roi"] = rois
self.last_result["image"] = wrapper.draw_rois(
img=wrapper.current_image, rois=rois
)
elif is_current_image_changed or (len(wrapper.image_list) == 0):
self.last_result["image"] = wrapper.current_image
else:
self.last_result["image"] = wrapper.image_list[-1]["image"]
self.last_result["mask"] = wrapper.mask
self.last_result["data"] = wrapper.csv_data_holder.data_list
if self.is_root:
if self.parent.settings.mosaic.enabled:
self.root.parent.mosaic = wrapper.build_mosaic(
image_names=self.parent.settings.mosaic.images,
images_dict=self.parent.stored_mosaic_images,
)
self.do_call_back(
call_back=call_back,
res=logging.INFO,
msg=f"Pipeline processed in {format_time(timer() - before)}",
data={
"name": f"{wrapper.luid}_final_mosaic",
"image": self.root.parent.mosaic,
"data": self.last_result["data"],
"plant_name": "unknown" if wrapper is None else wrapper.plant,
"luid": wrapper.luid,
},
force_call_back=True,
is_progress=False,
)
else:
self.do_call_back(
call_back=call_back,
res=logging.INFO,
msg=f"Processed {wrapper.luid} in {format_time(timer() - before)}",
data=self
if self.root.parent.show_group_result or self.root.parent.silent
else None,
force_call_back=True,
is_progress=False,
)
elif not target_module:
self.do_call_back(
call_back=call_back,
res=logging.INFO,
msg=f"Successfully processed {self.name}, merge mode: {self.merge_mode} in {format_time(timer() - before)}",
data=self if self.root.parent.show_group_result else None,
is_progress=False,
)
return self.last_result
def copy(self, parent):
return GroupNode(
parent=parent,
merge_mode=self.merge_mode,
name=self.name,
source=self.source,
nodes=[node.copy(parent=self) for node in self.nodes],
execute_filters=self.execute_filters,
)
def to_code(self, indent: int):
pass
def get_parent(self, item):
for node in self.nodes:
if hasattr(node, "uuid"):
if item.uuid == node.uuid:
return self
elif isinstance(node, GroupNode):
parent = node.get_parent(item)
if parent is not None:
return parent
return None
def to_json(self):
return dict(
node_type="group",
merge_mode=self.merge_mode,
name=self.name,
uuid=self.uuid,
source=self.source,
no_delete=self.no_delete,
nodes=[node.to_json() for node in self.nodes],
execute_filters=self.execute_filters,
)
@classmethod
def from_json(cls, parent, json_data: dict):
res = cls(
parent=parent,
merge_mode=json_data["merge_mode"],
name=json_data["name"],
uuid=json_data["uuid"],
no_delete=json_data["no_delete"],
source=json_data["source"],
execute_filters=json_data.get(
"execute_filters", cls.default_execution_filters
),
)
for node in json_data["nodes"]:
if node["node_type"] == "module":
res.nodes.append(ModuleNode.from_json(parent=res, json_data=node))
elif node["node_type"] == "group":
res.nodes.append(GroupNode.from_json(parent=res, json_data=node))
else:
eh.log_data(
log_msg=f"Unknown node type: {node['node_type']}",
log_level=logging.ERROR,
target_logger=logger,
)
return res
def modules(self):
return [node for node in self.nodes if isinstance(node, ModuleNode)]
def groups(self):
return [node for node in self.nodes if isinstance(node, GroupNode)]
def module(self, index) -> ModuleNode:
lst = self.modules()
if len(lst) > index:
return lst[index]
else:
return None
def group(self, index) -> Node:
lst = self.groups()
if len(lst) > index:
return lst[index]
else:
return None
def iter_items(self, types: tuple = ("groups", "modules")):
def parse_children_(parent):
for node in parent.nodes:
if (("groups" in types) and isinstance(node, GroupNode)) or (
("modules" in types) and isinstance(node, ModuleNode)
):
yield node
if isinstance(node, GroupNode):
yield from parse_children_(node)
if (("groups" in types) and isinstance(self, GroupNode)) or (
("modules" in types) and isinstance(self, ModuleNode)
):
yield self
yield from parse_children_(self)
def as_pivot_list(self, index, types: tuple = ("groups", "modules")) -> dict:
"""Splits all nodes in three classes
* before: all nodes before index
* pivot: index
* after: all nodes after index
"""
nodes = [node for node in self.iter_items(types)]
if index not in nodes:
return {}
res = {"before": [], "pivot": index, "after": []}
matched_uuid = False
for node in nodes:
if node.uuid == index.uuid:
matched_uuid = True
continue
if matched_uuid:
res["after"].append(node)
else:
res["before"].append(node)
return res
def find_by_uuid(self, uuid):
if self.uuid == uuid:
return self
for node in self.iter_items():
if node.uuid == uuid:
return node
else:
return None
def find_by_name(self, name):
"""Returns the node that matches exactly the name
There's no warranty that names are unique"""
for node in self.iter_items():
if node.name == name:
return node
else:
return None
def check_input(self, node) -> bool:
if isinstance(node, GroupNode):
if self.merge_mode in [ipc.MERGE_MODE_AND, ipc.MERGE_MODE_OR]:
has_image, has_mask = False, False
for node in self.nodes:
if node.output_type in [ipc.IO_DATA, ipc.IO_NONE, ipc.IO_ROI]:
return False
elif node.output_type == ipc.IO_IMAGE:
has_image = True
elif node.output_type == ipc.IO_MASK:
has_mask = True
else:
return False
if has_image and has_mask:
return False
if isinstance(node, GroupNode) and node.module_count > 0:
n = node.module(0)
else:
n = node
if isinstance(n, GroupNode):
return True
pivot_list = self.as_pivot_list(index=n, types=("modules"))
if not pivot_list:
return False
if len(pivot_list["before"]) > 0:
if n.input_type == ipc.IO_DATA:
needed_output = ipc.IO_DATA
elif n.input_type == ipc.IO_IMAGE:
return True
elif n.input_type == ipc.IO_MASK:
needed_output = ipc.IO_MASK
elif n.input_type == ipc.IO_NONE:
return True
elif n.input_type == ipc.IO_ROI:
needed_output = ipc.IO_ROI
for node in pivot_list["before"]:
if node.output_type in needed_output:
return True
else:
return False
else:
return n.input_type in (ipc.IO_IMAGE)
def invalidate(self, node):
pivot_list = self.as_pivot_list(index=node)
node.last_result = {}
for node in pivot_list["after"]:
if isinstance(node, ModuleNode):
node.invalidate()
elif isinstance(node, GroupNode):
node.last_result = {}
@property
def input_type(self):
if len(self.nodes) == 0:
return ipc.IO_NONE
else:
return self.nodes[0].input_type
@property
def output_type(self):
if len(self.nodes) == 0:
return ipc.IO_NONE
else:
return self.nodes[-1].output_type
@property
def node_count(self):
return len(self.nodes)
@property
def group_count(self):
return len(self.groups())
@property
def module_count(self):
return len(self.modules())
@property
def enabled(self):
if self.node_count == 0:
return 0
else:
has_enabled = False
has_disabled = False
for node in self.nodes:
if isinstance(node, GroupNode):
enabled_state = node.enabled
if enabled_state == 0:
has_disabled = True
elif enabled_state == 1:
return 1
elif enabled_state == 2:
has_enabled = True
elif isinstance(node, ModuleNode):
if node.enabled:
has_enabled = True
else:
has_disabled = True
if has_enabled and has_disabled:
return 1
return 2 if has_enabled else 0
@enabled.setter
def enabled(self, value):
for node in self.nodes:
node.enabled = value
@property
def matches_filters(self):
wrapper = self.root.parent.wrapper
for k, v in self.execute_filters.items():
current_filter = [filter_ for filter_ in IptParam.decode_string(v) if filter_]
if not current_filter:
continue
if not hasattr(wrapper, k) or getattr(wrapper, k) not in current_filter:
return False
return True
class LoosePipeline(object):
def __init__(self, **kwargs):
self.root: GroupNode = GroupNode(
merge_mode=ipc.MERGE_MODE_CHAIN, name="Pipeline", parent=self
)
self.target_data_base = None
self.settings: PipelineSettings = PipelineSettings(pipeline=self)
self.last_wrapper_luid = ""
self.use_cache = True
self.image_output_path = ""
self.name = ""
self.description = "Please insert description"
self.stored_mosaic_images = {}
self._stop_processing = False
self.set_template(kwargs.get("template", None))
self.silent = False
self.mosaic = None
self.wrapper: BaseImageProcessor = None
self.error_level = logging.INFO
self.set_callbacks()
def __repr__(self):
return json.dumps(self.to_json(), indent=2, sort_keys=False)
def __str__(self):
return f"Pipeline {self.name}"
def set_template(self, template):
if isinstance(template, str):
if template == "default":
self.root.add_group(
name="Fix image",
merge_mode=ipc.MERGE_MODE_CHAIN,
source="source",
uuid="fix_image",
)
self.root.add_group(
name="Pre process image",
merge_mode=ipc.MERGE_MODE_CHAIN,
source=self.root.group(0).uuid,
uuid="pre_process_image",
)
self.root.add_group(
name="Build mask",
merge_mode=ipc.MERGE_MODE_AND,
source=self.root.group(1).uuid,
uuid="build_mask",
)
self.root.add_group(
name="Clean mask",
merge_mode=ipc.MERGE_MODE_CHAIN,
source=self.root.group(1).uuid,
uuid="clean_mask",
)
self.root.add_group(
name="Extract features",
merge_mode=ipc.MERGE_MODE_CHAIN,
source=self.root.group(0).uuid,
uuid="extract_features",
)
elif template == "legacy":
self.root.add_group(
name="ROIs from raw image",
merge_mode=ipc.MERGE_MODE_NONE,
source="source",
uuid="roi_raw",
)
self.root.add_group(
name="Fix image",
merge_mode=ipc.MERGE_MODE_CHAIN,
source="source",
uuid="fix_image",
)
self.root.add_group(
name="Pre process image",
merge_mode=ipc.MERGE_MODE_CHAIN,
source="fix_image",
uuid="pre_process_image",
)
self.root.add_group(
name="ROIs from raw pre processed image",
merge_mode=ipc.MERGE_MODE_NONE,
source="pre_process_image",
uuid="roi_pre_processed",
)
self.root.add_group(
name="Build mask",
merge_mode=ipc.MERGE_MODE_AND,
source="pre_process_image",
uuid="build_mask",
)
self.root.add_group(
name="Apply ROIS",
merge_mode=ipc.MERGE_MODE_CHAIN,
source=self.root.group(1).uuid,
uuid="apply_roi",
)
self.root.add_group(
name="Clean mask",
merge_mode=ipc.MERGE_MODE_CHAIN,
source="pre_process_image",
uuid="clean_mask",
)
self.root.add_group(
name="Assert mask position",
merge_mode=ipc.MERGE_MODE_NONE,
source=self.root.group(1).uuid,
uuid="assert_mask_position",
)
self.root.add_group(
name="Extract features",
merge_mode=ipc.MERGE_MODE_CHAIN,
source="fix_image",
uuid="extract_features",
)
self.root.add_group(
name="Build images",
merge_mode=ipc.MERGE_MODE_CHAIN,
source="fix_image",
uuid="build_images",
)
def add_module(self, operator, target_group: str = "") -> bool:
if not target_group:
target_group = self.root
else:
target_group = self.root.find_by_uuid(target_group)
if target_group is None or operator is None:
return False
target_group.add_module(tool=operator)
return True
def execute(
self,
src_image: Union[str, BaseImageProcessor],
silent_mode: bool = False,
additional_data: dict = {},
write_data: bool = False,
target_data_base=None,
overwrite_data: bool = False,
store_images: bool = True,
options=None,
**kwargs,
):
# Override settings
self.error_level = logging.INFO
self.stop_processing = False
self.silent = silent_mode
self.text_result = ""
# Build/retrieve wrapper
if isinstance(src_image, str):
self.wrapper = BaseImageProcessor(
src_image,
options=options,
database=target_data_base,
)
elif isinstance(src_image, BaseImageProcessor):
self.wrapper = src_image
else:
logger.error(f"Unknown source {str(src_image)}")
self.error_level = logging.ERROR
self.text_result = "Source error"
return False
# Check wrapper
if self.wrapper is None:
if isinstance(src_image, str):
logger.error(f"Unable to build wrapper from file '{src_image}''")
else:
logger.error("Unable to retrieve wrapper.")
self.error_level = logging.ERROR
self.text_result = "Wrapper error"
return False
elif os.path.isfile(self.wrapper.csv_file_path) and overwrite_data is False:
self.error_level = logging.INFO
self.text_result = "Skipped"
return True
elif not self.wrapper.check_source_image():
if isinstance(src_image, str):
logger.error(f"Image seems to be corrupted '{src_image}''")
else:
logger.error("Image seems to be corrupted.")
self.text_result = "Corrupted image"
self.error_level = logging.ERROR
return False
# Override wrapper settings
if self.last_wrapper_luid != self.wrapper.luid:
self.invalidate()
self.last_wrapper_luid = self.wrapper.luid
self.wrapper.lock = True
self.wrapper.target_database = target_data_base
self.wrapper.store_images = store_images and (
self.root.parent.debug_mode or bool(kwargs.get("target_module", ""))
)
for module in self.root.iter_items(types=("modules",)):
self.wrapper.forced_storage_images_list.extend(module.tool.required_images)
if self.image_output_path:
pass
elif options is None:
self.image_output_path = ipso_folders.get_path(
key="image_output",
force_creation=False,
)
else:
self.image_output_path = self.wrapper.dst_path
# Prepare data holder
self.wrapper.init_data_holder()
# Execute pipeline
self.root.execute(**kwargs)
# Update data with forced key value pairs
for k, v in additional_data.items():
self.wrapper.csv_data_holder.update_csv_value(key=k, value=v, force_pair=True)
if write_data is True:
try:
with open(self.wrapper.csv_file_path, "w", newline="") as csv_file_:
wr = csv.writer(csv_file_, quoting=csv.QUOTE_NONE)
wr.writerow(self.wrapper.csv_data_holder.header_to_list())
wr.writerow(self.wrapper.csv_data_holder.data_to_list())
except Exception as e:
logger.exception(f"Failed to write image data because {repr(e)}")
index = kwargs.get("index", -1)
total = kwargs.get("total", -1)
if index >= 0 and total >= 0:
eh.log_data(
log_msg=(
f'{"OK" if self.error_level < self.stop_on else "FAIL"} - '
+ f"{(index + 1):{len(str(total))}d}/{total} >>> "
+ self.wrapper.name
),
log_level=self.error_level,
target_logger=logger,
)
index = kwargs.get("index", -1)
total = kwargs.get("total", -1)
if index >= 0 and total >= 0:
eh.log_data(
log_msg=(
f'{"OK" if self.error_level < self.stop_on else "FAIL"} - '
+ f"{(index + 1):{len(str(total))}d}/{total} >>> "
+ self.wrapper.name
),
log_level=self.error_level,
target_logger=logger,
)
return self.error_level < self.stop_on
def targeted_callback(self, param: IptParam):
if param.name == "debug_mode":
if self.root.nodes:
self.root.invalidate(self.root)
else:
print(f"{param.name} was set")
def set_callbacks(self):
p = self.settings.find_by_name(name="debug_mode")
if p is not None:
p.on_change = self.targeted_callback
def invalidate(self):
self.stored_mosaic_images = {}
for node in self.root.iter_items():
if isinstance(node, ModuleNode):
node.invalidate()
elif isinstance(node, GroupNode):
node.last_result = {}
def save(self, file_name: str) -> bool:
try:
with open(file_name, "w") as f:
json.dump(self.to_json(), f, indent=2)
except Exception as e:
logger.exception(
f'Failed to save pipeline "{repr(e)}"',
)
return False
else:
return True
@classmethod
def load(cls, file_name: str):
with open(file_name, "r") as f:
return cls.from_json(json_data=json.load(f))
def copy(self):
return self.__class__.from_json(self.to_json())
def get_parent(self, item: Union[GroupNode, ModuleNode]) -> GroupNode:
return self.root.get_parent(item=item)
def remove_item(self, item: Union[GroupNode, ModuleNode]):
self.root.remove_node(item)
def to_code(self):
pass
def to_json(self):
save_dict = {
"title": "IPSO Phen pipeline V2",
"name": self.name,
"description": self.description,
"date": dt.now().strftime("%Y_%b_%d_%H-%M-%S"),
"version": last_script_version,
}
# Add settings
save_dict["settings"] = self.settings.params_to_dict()
# Add root node
save_dict["Pipeline"] = self.root.to_json()
return save_dict
@classmethod
def from_json(cls, json_data: dict):
if json_data["title"].lower() == "ipso phen pipeline v2":
res = cls()
res.name = json_data["name"]
res.description = json_data["description"]
res.settings = PipelineSettings(pipeline=res, **json_data["settings"])
res.root = GroupNode.from_json(parent=res, json_data=json_data["Pipeline"])
elif json_data["title"].lower() == "ipso phen pipeline":
res = cls(template="default_groups")
tmp = IptStrictPipeline.from_json(json_data=json_data)
# Import basic data
res.name = tmp.name
res.description = "Pipeline imported from old format, please check data"
# Import settings
for setting in res.settings.gizmos:
p = tmp.settings.find_by_name(setting.name)
if p is not None:
setting.value = p.value
# create groups
res.set_template(template="legacy")
# Import nodes & modules
for uuid, kinds in zip(
[
"roi_raw",
"fix_image",
"pre_process_image",
"roi_pre_processed",
"build_mask",
],
[
ipc.ToolFamily.ROI_RAW_IMAGE_STR,
[ipc.ToolFamily.WHITE_BALANCE, ipc.ToolFamily.EXPOSURE_FIXING],
ipc.ToolFamily.PRE_PROCESSING,
ipc.ToolFamily.ROI_PP_IMAGE_STR,
ipc.ToolFamily.THRESHOLD,
],
):
src_group = tmp.get_operators(constraints={"kind": kinds})
dst_group = res.root.find_by_uuid(uuid=uuid)
for tool_dict in src_group:
dst_group.add_module(
tool=tool_dict["tool"].copy(),
enabled=tool_dict["enabled"],
uuid=tool_dict["uuid"],
)
res.root.find_by_uuid(uuid="build_mask").merge_mode = (
ipc.MERGE_MODE_AND
if tmp.merge_method == "multi_and"
else ipc.MERGE_MODE_OR
)
rois = tmp.get_operators(
constraints={
"kind": [
ipc.ToolFamily.ROI_PP_IMAGE_STR,
ipc.ToolFamily.ROI_RAW_IMAGE_STR,
]
}
)
dst_group = res.root.find_by_uuid(uuid="apply_roi")
for tool_dict in rois:
ipt = tool_dict["tool"]
roi_type = ipt.get_value_of("roi_type")
if roi_type not in [
"keep",
"delete",
"erode",
"dilate",
"open",
"close",
]:
continue
dst_group.add_module(
tool=get_ipt_class(class_name="IptApplyRoi")(
roi_names=ipt.get_value_of("roi_name"),
roi_selection_mode="all_named",
roi_type=roi_type,
input_source="mask",
output_mode="mask",
)
)
dst_group = res.root.find_by_uuid(uuid="assert_mask_position")
for tool_dict in rois:
ipt = tool_dict["tool"]
if ipt.get_value_of("roi_type") not in ["enforce"]:
continue
dst_group.add_module(
tool=get_ipt_class(class_name="IptAssertMaskPosition")(
roi_names=ipt.get_value_of("roi_name"),
roi_selection_mode="all_named",
)
)
for uuid, kinds in zip(
["clean_mask", "extract_features", "build_images"],
[
ipc.ToolFamily.MASK_CLEANUP,
ipc.ToolFamily.FEATURE_EXTRACTION,
ipc.ToolFamily.IMAGE_GENERATOR,
],
):
src_group = tmp.get_operators(constraints={"kind": kinds})
dst_group = res.root.find_by_uuid(uuid=uuid)
for tool_dict in src_group:
dst_group.add_module(
tool=tool_dict["tool"].copy(),
enabled=tool_dict["enabled"],
uuid=tool_dict["uuid"],
)
else:
res = cls()
res.name = (
f'Failed to load unknown pipeline type "{json_data["title"].lower()}"'
)
if res.stop_on < 10:
res.stop_on = 35
res.set_callbacks()
return res
def update_error_level(self, error_level):
self.error_level = max(self.error_level, error_level)
@property
def node_count(self):
return len(self.root.nodes)
@property
def threshold_only(self):
return self.settings.get_value_of("threshold_only") == 1
@threshold_only.setter
def threshold_only(self, value):
self.settings.set_value_of(
key="threshold_only", value=1 if value is True else 0, update_widgets=False
)
@property
def debug_mode(self):
return self.settings.get_value_of("debug_mode") == 1
@debug_mode.setter
def debug_mode(self, value):
self.settings.set_value_of(
key="debug_mode", value=1 if value is True else 0, update_widgets=False
)
@property
def show_tool_result(self):
return self.settings.get_value_of("show_tool_result") == 1
@show_tool_result.setter
def show_tool_result(self, value):
self.settings.set_value_of(
key="show_tool_result", value=1 if value is True else 0, update_widgets=False
)
@property
def show_group_result(self):
return self.settings.get_value_of("show_group_result") == 1
@show_group_result.setter
def show_group_result(self, value):
self.settings.set_value_of(
key="show_group_result", value=1 if value is True else 0, update_widgets=False
)
@property
def tool_group_name_watermark(self):
return self.settings.get_value_of("tool_group_name_watermark") == 1
@tool_group_name_watermark.setter
def tool_group_name_watermark(self, value):
self.settings.set_value_of(
key="tool_group_name_watermark",
value=1 if value is True else 0,
update_widgets=False,
)
@property
def stop_on(self) -> int:
return self.settings.get_value_of("stop_on")
@stop_on.setter
def stop_on(self, value: int):
self.settings.set_value_of("stop_on", value)
@property
def last_image(self):
return self.settings.get_value_of("last_image")
@last_image.setter
def last_image(self, value):
self.settings.set_value_of("last_image", value)
@property
def allow_step_mosaics(self):
return self.settings.get_value_of("allow_step_mosaics")
@allow_step_mosaics.setter
def allow_step_mosaics(self, value):
self.settings.set_value_of("allow_step_mosaics", value)
@property
def show_source_image(self):
return self.settings.get_value_of("show_source_image")
@show_source_image.setter
def show_source_image(self, value):
self.settings.set_value_of("show_source_image", value)
@property
def stop_processing(self):
return self._stop_processing or self.error_level >= self.stop_on
@stop_processing.setter
def stop_processing(self, value):
self._stop_processing = value
| 36.844614
| 125
| 0.495232
|
from ipapi.tools.folders import ipso_folders
from uuid import uuid4
import json
from datetime import datetime as dt
from timeit import default_timer as timer
import itertools
from typing import Union
import logging
import csv
import os
import numpy as np
from ipapi.base.ipt_abstract import IptParam, IptBase, IptParamHolder
from ipapi.base.ipt_functional import get_ipt_class
from ipapi.base import ip_common as ipc
from ipapi.base.ipt_strict_pipeline import IptStrictPipeline
from ipapi.base.ip_abstract import BaseImageProcessor
import ipapi.tools.error_holder as eh
from ipapi.tools.common_functions import format_time
from ipapi.tools.regions import RectangleRegion
logger = logging.getLogger(__name__)
last_script_version = "0.2.0.0"
class MosaicData(object):
def __init__(self, pipeline, enabled, images):
super().__init__()
self.enabled = enabled
self.images = images
if isinstance(self.images, str):
self.images = [
[i for i in line.split(",")] for line in self.images.split("\n")
]
self.pipeline = pipeline
class PipelineSettings(IptParamHolder):
def __init__(self, pipeline, **kwargs):
self.update_feedback_items = []
super(PipelineSettings, self).__init__(**kwargs)
self.mosaic = MosaicData(
pipeline=pipeline,
enabled=kwargs.get("mosaic_enabled", kwargs.get("build_mosaic", True) == 1),
images=kwargs.get(
"mosaic_images", kwargs.get("mosaic_items", [["source", "mask"]])
),
)
def build_params(self):
self.add_checkbox(
name="show_tool_result",
desc="Show a result image for each tool",
default_value=1,
)
self.add_checkbox(
name="show_group_result",
desc="Show a result image for each group",
default_value=0,
)
self.add_checkbox(
name="debug_mode",
desc="Display debug images",
default_value=0,
hint="Display module's intermediary images",
)
self.add_checkbox(
name="allow_step_mosaics",
desc="Allow mosaics for steps",
default_value=1,
hint="If checked, some steps will return mosaics instead of single images",
)
self.add_checkbox(
name="show_source_image",
desc="Show source image/mask for each tool",
default_value=0,
)
self.add_checkbox(
name="tool_group_name_watermark",
desc="Add a watermark with the name of the generating source to each output image",
default_value=0,
)
self.add_combobox(
name="stop_on",
desc="Stop processing on error level",
default_value=eh.ERR_LVL_EXCEPTION,
values={i: eh.error_level_to_str(i) for i in [0, 10, 20, 30, 35, 40, 50]},
hint="If any error of the selected level or higher happens the process will halt",
)
def params_to_dict(
self,
include_input: bool = True,
include_output: bool = False,
include_neutral: bool = False,
):
dic = {}
for p in self.gizmos:
if (
(include_input and p.is_input)
or (include_output and p.is_output)
or (include_neutral and p.is_neutral)
):
dic[p.name] = p.value
dic["mosaic_enabled"] = self.mosaic.enabled
dic["mosaic_images"] = self.mosaic.images
return dic
def items(self):
return self.gizmos + [self.mosaic]
@property
def node_count(self):
return len(self.items())
class Node(object):
def __init__(self, **kwargs):
self.uuid = kwargs.get("uuid", str(uuid4()))
if not self.uuid:
self.uuid = str(uuid4())
self.parent = kwargs.get("parent")
self.last_result = {}
def get_relevant_image(self, exclude_demo: bool = False):
if not exclude_demo:
demo_image = self.last_result.get("demo_image", None)
if demo_image is not None:
ri = demo_image
if self.output_type == ipc.IO_IMAGE:
ri = self.last_result.get(
"image", np.full((100, 100, 3), ipc.C_FUCHSIA, np.uint8)
)
elif self.output_type == ipc.IO_MASK:
ri = self.last_result.get(
"mask", np.full((100, 100, 3), ipc.C_FUCHSIA, np.uint8)
)
elif self.output_type in [ipc.IO_DATA, ipc.IO_ROI, ipc.IO_NONE]:
ri = self.last_result.get(
"image",
self.last_result.get(
"mask", np.full((100, 100, 3), ipc.C_FUCHSIA, np.uint8)
),
)
else:
ri = np.full((100, 100, 3), ipc.C_FUCHSIA, np.uint8)
if self.root.parent.tool_group_name_watermark:
ri = ri.copy()
BaseImageProcessor.draw_text(
img=ri,
text=self.name,
fnt_color=ipc.C_WHITE,
background_color=ipc.C_BLACK,
)
return ri
def get_feedback_image(self, data: dict):
demo_image = data.get("demo_image", None)
if demo_image is not None:
fi = demo_image
else:
mask = data.get("mask", None)
image = data.get("image", None)
if (
mask is not None
and image is not None
and self.root.parent.allow_step_mosaics
):
h = max(mask.shape[0], image.shape[0])
w = max(mask.shape[1], image.shape[1])
canvas = ipc.enclose_image(
a_cnv=np.full(
shape=(h + 4, w * 2 + 6, 3),
fill_value=ipc.C_SILVER,
dtype=np.uint8,
),
img=image,
rect=RectangleRegion(left=2, top=2, width=w, height=h),
)
fi = ipc.enclose_image(
a_cnv=canvas,
img=np.dstack((mask, mask, mask)),
rect=RectangleRegion(left=w + 4, top=2, width=w, height=h),
)
elif mask is not None:
fi = mask
elif image is not None:
fi = image
else:
fi = np.full((100, 100, 3), ipc.C_FUCHSIA, np.uint8)
if self.root.parent.tool_group_name_watermark:
fi = fi.copy()
BaseImageProcessor.draw_text(
img=fi,
text=self.name,
fnt_color=ipc.C_WHITE,
background_color=ipc.C_BLACK,
)
return fi
def do_call_back(
self,
call_back,
res,
msg,
data,
is_progress=True,
force_call_back=False,
**kwargs,
):
if call_back is not None:
call_back(
eh.error_level_to_str(res),
msg,
data
if call_back is not None
and (force_call_back or not self.root.parent.silent)
else None,
self.absolute_index + 1 if is_progress else -1,
self.absolute_count if is_progress else -1,
)
else:
if isinstance(res, int) and (res >= logging.WARNING):
eh.log_data(log_msg=msg, log_level=res, target_logger=logger)
md = np.array(self.root.parent.settings.mosaic.images)
wrapper = self.root.parent.wrapper
needed_images = wrapper.forced_storage_images_list
if isinstance(data, (GroupNode, ModuleNode)):
dn = data.name
if dn in md:
self.root.parent.stored_mosaic_images[dn] = self.get_relevant_image()
if dn in needed_images:
wrapper.store_image(
image=self.get_relevant_image(exclude_demo=True),
text=dn,
force_store=True,
)
elif isinstance(data, BaseImageProcessor):
for d in data.image_list:
if d["name"] in md:
self.root.parent.stored_mosaic_images[d["name"]] = d["image"]
self.root.parent.update_error_level(res)
@property
def root(self):
root = self
while root.parent is not None and not isinstance(root.parent, LoosePipeline):
root = root.parent
return root
@property
def absolute_index(self):
if isinstance(self, GroupNode):
if isinstance(self.parent, LoosePipeline):
return self.absolute_count
lst = self.root.as_pivot_list(index=self, types=("groups"))
elif isinstance(self, ModuleNode):
lst = self.root.as_pivot_list(index=self, types=("modules"))
else:
return -2
return len(lst.get("before", ()))
@property
def absolute_count(self):
if isinstance(self, GroupNode):
return len(list(self.root.iter_items(types=("groups"))))
elif isinstance(self, ModuleNode):
return len(list(self.root.iter_items(types=("modules"))))
else:
return -2
@property
def stop_processing(self):
return self.root.parent.stop_processing
@stop_processing.setter
def stop_processing(self, value):
self.root.parent.stop_processing = value
@property
def is_module(self):
return isinstance(self, ModuleNode)
@property
def is_group(self):
return isinstance(self, GroupNode)
@property
def is_root(self):
return isinstance(self.parent, LoosePipeline)
class ModuleNode(Node):
def __init__(self, **kwargs):
Node.__init__(self, **kwargs)
self.enabled = kwargs.get("enabled", 1)
self.tool = kwargs.get("tool")
self.tool.owner = self
def _execute_standard(self, tool, call_back=None, target_module: str = ""):
res = {}
wrapper = self.root.parent.wrapper
if self.root.parent.show_source_image:
if wrapper is not None and wrapper.current_image is not None:
self.do_call_back(
call_back=call_back,
res=logging.INFO,
msg="",
data={
"plant_name": wrapper.plant,
"name": f"{self.name} (source image)",
"image": wrapper.current_image,
"luid": wrapper.luid,
"data": {},
},
)
if wrapper is not None and wrapper.mask is not None:
self.do_call_back(
call_back=call_back,
res=logging.INFO,
msg="",
data={
"plant_name": wrapper.plant,
"name": f"{self.name} (source mask)",
"image": wrapper.mask,
"luid": wrapper.luid,
"data": {},
},
)
if tool.process_wrapper(wrapper=wrapper):
# Get ROI
if self.output_type == ipc.IO_ROI:
func = getattr(tool, "generate_roi", None)
if callable(func):
roi = func(wrapper=wrapper)
if roi is not None:
res["roi"] = roi
if not wrapper.store_images:
wrapper.store_image(
image=roi.draw_to(
dst_img=wrapper.current_image,
line_width=max(4, wrapper.width // 200),
),
text=self.name,
force_store=True,
)
else:
self.do_call_back(
call_back=call_back,
res=logging.ERROR,
msg=f"Failed to generate ROI from {self.name}",
data=wrapper if self.root.parent.debug_mode else self,
)
# Get data
if hasattr(tool, "data_dict"):
res["data"] = tool.data_dict
# Get mask
if self.output_type == ipc.IO_MASK:
res["mask"] = tool.result
if tool.result is None:
self.do_call_back(
call_back=call_back,
res=logging.WARNING,
msg=f"Failed to generate mask from {self.name}",
data=None,
)
# Get image
if (
self.output_type in [ipc.IO_MASK, ipc.IO_NONE, ipc.IO_ROI]
and tool.demo_image is not None
):
res["image"] = tool.demo_image
elif self.output_type == ipc.IO_ROI:
res["image"] = wrapper.draw_rois(
img=wrapper.current_image, rois=[res["roi"]]
)
elif self.output_type == ipc.IO_DATA:
if tool.demo_image is not None:
res["image"] = tool.demo_image
else:
res["image"] = wrapper.current_image
elif self.output_type == ipc.IO_IMAGE and isinstance(tool.result, np.ndarray):
res["image"] = tool.result
# Get demo image
if tool.demo_image is not None:
res["demo_image"] = tool.demo_image
return res
def _execute_grid_search(self, call_back):
def inner_call_back(res, msg, data, step, total):
if call_back is not None:
call_back(
res,
msg,
data,
step,
total,
)
param_settings_list = [p.decode_grid_search_options() for p in self.tool.gizmos]
size = 1
for ps in param_settings_list:
if len(ps) > 0:
size *= len(ps)
inner_call_back(
res="GRID_SEARCH_START",
msg="",
data=None,
step=0,
total=size,
)
procs = list(itertools.product(*param_settings_list))
keys = [p.name for p in self.tool.gizmos]
wrapper = self.root.parent.wrapper
for i, p in enumerate(procs):
res = self._execute_standard(
tool=self.tool.__class__(
**{k: (int(v) if str.isdigit(v) else v) for k, v in zip(keys, p)}
),
)
inner_call_back(
res="GRID_SEARCH_OK" if res else "GRID_SEARCH_NOK",
msg="Failed to process element",
data={
"plant_name": wrapper.plant,
"name": wrapper.short_name,
"image": self.get_feedback_image(res),
"data": res.get("data", {}),
"luid": wrapper.luid,
},
step=i + 1,
total=size,
)
inner_call_back(
res="GRID_SEARCH_END",
msg="",
data=None,
step=size,
total=size,
)
def execute(self, **kwargs):
call_back = kwargs.get("call_back", None)
target_module = kwargs.get("target_module", "")
grid_search_mode = kwargs.get("grid_search_mode", "")
wrapper = self.root.parent.wrapper
if not self.last_result:
if hasattr(self.tool, "output_path") and self.root.parent.image_output_path:
self.tool.output_path = self.root.parent.image_output_path
if target_module == self.uuid and grid_search_mode:
self._execute_grid_search(call_back=call_back)
self.last_result = {}
else:
before = timer()
self.last_result = self._execute_standard(
tool=self.tool,
call_back=call_back,
target_module=target_module,
)
if self.root.parent.debug_mode:
data = wrapper
elif self.root.parent.show_tool_result:
data = self
else:
data = None
if self.last_result:
self.do_call_back(
call_back=call_back,
res=logging.INFO,
msg=f"Successfully processed {self.name} in {format_time(timer() - before)}",
data=data,
)
else:
if ipc.ToolFamily.ASSERT in self.tool.use_case:
self.do_call_back(
call_back=call_back,
res=logging.ERROR,
msg=f'Assertion "{self.tool.name}" failed for {self.name}',
data=wrapper
if self.root.parent.debug_mode or self.uuid == target_module
else self,
)
else:
self.do_call_back(
call_back=call_back,
res=logging.ERROR,
msg=f"Failed to process {self.name} in {format_time(timer() - before)}",
data=wrapper
if self.root.parent.debug_mode or self.uuid == target_module
else self,
)
return self.last_result
def invalidate(self):
self.last_result = {}
def copy(self, parent):
return ModuleNode(
parent=parent,
tool=self.tool,
enabled=self.enabled,
uuid=self.uuid,
)
def to_code(self, indent: int):
pass
def to_json(self):
return {
"node_type": "module",
"tool": self.tool.to_json(),
"enabled": self.enabled,
"uuid": self.uuid,
}
@classmethod
def from_json(cls, parent, json_data: dict):
if json_data["node_type"] != "module":
return None
tool = IptBase.from_json(json_data["tool"])
if isinstance(tool, Exception):
eh.log_data(
log_msg=f"Failed to load module: {repr(tool)}",
log_level=eh.ERR_LVL_EXCEPTION,
target_logger=logger,
)
elif isinstance(tool, IptBase):
return ModuleNode(
tool=tool,
parent=parent,
enabled=json_data["enabled"],
uuid=json_data["uuid"],
)
def sugar_name(self):
if self.tool.has_param("roi_name") and self.tool.get_value_of("roi_name"):
return f'{self.tool.name} {self.tool.get_value_of("roi_name")}'
elif self.tool.has_param("channel"):
return f'{self.tool.name} {self.tool.get_value_of("channel")}'
elif self.tool.name == "Morphology":
return f'{self.tool.name} {self.tool.get_value_of("morph_op")}'
elif self.tool.has_param("roi_names") and self.tool.get_value_of("roi_names"):
return f'{self.tool.name} {self.tool.get_value_of("roi_names")}'
else:
return self.tool.name
@property
def input_type(self):
if isinstance(self.tool, IptBase):
return self.tool.input_type
else:
return ipc.IO_NONE
@property
def output_type(self):
if isinstance(self.tool, IptBase):
return self.tool.output_type
else:
return ipc.IO_NONE
@property
def name(self):
sn = self.sugar_name()
nodes = [
node
for node in self.root.as_pivot_list(index=self, types=("modules",))["before"]
if node.sugar_name() == sn
]
return sn if len(nodes) == 0 else f"{sn} ({len(nodes)})"
class GroupNode(Node):
default_execution_filters = {
k: "" for k in ["experiment", "plant", "date", "time", "camera", "view_option"]
}
def __init__(self, **kwargs):
Node.__init__(self, **kwargs)
self.merge_mode = kwargs.get("merge_mode")
self.name = kwargs.get("name", "")
self.nodes = kwargs.get("nodes", [])
self.source = kwargs.get("source", "source")
self.no_delete = kwargs.get("no_delete", False)
self.execute_filters = kwargs.get(
"execute_filters",
self.default_execution_filters,
)
self.last_result = {}
def add_module(self, tool, enabled=1, uuid: str = "") -> ModuleNode:
new_module = ModuleNode(parent=self, tool=tool, enabled=enabled, uuid=uuid)
self.nodes.append(new_module)
return new_module
def add_group(
self,
merge_mode: str,
name: str = "",
source="",
no_delete: bool = False,
uuid: str = "",
):
# Set source
if not source:
if len(self.nodes) > 0 and isinstance(self.nodes[-1], GroupNode):
source = self.nodes[-1].uuid
elif len(self.nodes) == 0:
source = "source"
else:
source = "last_output"
# Set unique name
root = self.root
group_names = [group.name for group in root.iter_items(types=("groups",))]
if not name or name in group_names:
if not name:
name = "Group"
i = 1
while f"{name} {i}" in group_names:
i += 1
name = f"{name} {i}"
# Create group
new_node = GroupNode(
parent=self, merge_mode=merge_mode, name=name, source=source, uuid=uuid
)
self.nodes.append(new_node)
return new_node
def remove_node(self, node: Union[int, object]):
if isinstance(node, int):
node = self.nodes[node]
if not isinstance(node, GroupNode) or not node.no_delete:
self.root.invalidate(node)
self.nodes.remove(node)
def insert_node(self, index, node):
if isinstance(node, GroupNode) or isinstance(node, ModuleNode):
self.nodes.insert(min(0, max(index, len(self.nodes))), node)
def get_source_image(self, source: str, call_back):
wrapper = self.root.parent.wrapper
if source == "source":
return wrapper.source_image
elif source == "last_output":
nodes = self.root.as_pivot_list(index=self)
for node in reversed(nodes["before"]):
if (
node.enabled
and node.output_type == ipc.IO_IMAGE
and node.last_result.get("image", None) is not None
):
return node.last_result["image"]
break
else:
return wrapper.current_image
else:
node = self.root.find_by_uuid(source)
if (
node is None
or node.last_result.get("image", None) is None
or node.enabled == 0
):
self.last_result = {}
self.do_call_back(
call_back=call_back,
res=logging.WARNING,
msg=f"{self.name} - Failed to retrieve source {source}, selecting last output instead",
data=None,
is_progress=False,
)
return self.get_source_image(source="last_output", call_back=call_back)
else:
return node.last_result.get("image")
def execute(self, **kwargs):
before = timer()
call_back = kwargs.get("call_back", None)
target_module = kwargs.get("target_module", "")
wrapper = self.root.parent.wrapper
wrapper.current_image = self.get_source_image(
source=self.source, call_back=call_back
)
rois = []
only_rois = False
for node in self.nodes:
if not node.enabled:
continue
if node.output_type != ipc.IO_ROI:
break
else:
only_rois = True
is_current_image_changed = False
def add_roi(wrapper: BaseImageProcessor, roi_list: list, roi, node):
if roi is None:
logger.warning(f"Missing ROI for {node.name}")
else:
if isinstance(roi, list):
roi_list.extend(roi)
wrapper.add_rois(roi_list=roi)
else:
roi_list.append(roi)
wrapper.add_roi(new_roi=roi)
if self.merge_mode == ipc.MERGE_MODE_NONE:
for node in self.nodes:
if not node.enabled:
continue
if node.is_group and not node.matches_filters:
self.do_call_back(
call_back=call_back,
res=logging.INFO,
msg=f"Group {node.name} did not match filters, skipped",
data=None,
is_progress=False,
)
continue
res = node.execute(**kwargs)
if self.stop_processing:
return res
if res:
if node.output_type == ipc.IO_DATA:
wrapper.csv_data_holder.data_list.update(res["data"])
elif node.output_type == ipc.IO_ROI:
add_roi(
wrapper=wrapper,
roi_list=rois,
roi=res.get("roi", None),
node=node,
)
else:
self.last_result["outcome"] = False
if node.uuid == target_module:
self.stop_processing = True
return self.last_result
elif self.merge_mode == ipc.MERGE_MODE_CHAIN:
for node in self.nodes:
if not node.enabled:
continue
if node.is_group and not node.matches_filters:
self.do_call_back(
call_back=call_back,
res=logging.INFO,
msg=f"Group {node.name} did not match filters, skipped",
data=None,
is_progress=False,
)
continue
res = node.execute(**kwargs)
if self.stop_processing:
return res
if res:
if node.output_type == ipc.IO_IMAGE:
wrapper.current_image = res["image"]
is_current_image_changed = True
elif node.output_type == ipc.IO_MASK:
wrapper.mask = res["mask"]
elif node.output_type == ipc.IO_DATA:
wrapper.csv_data_holder.data_list.update(res["data"])
elif node.output_type == ipc.IO_ROI:
add_roi(
wrapper=wrapper,
roi_list=rois,
roi=res.get("roi", None),
node=node,
)
else:
self.last_result["outcome"] = False
if node.uuid == target_module:
self.stop_processing = True
return node.last_result
elif self.merge_mode in [ipc.MERGE_MODE_AND, ipc.MERGE_MODE_OR]:
images = []
for node in self.nodes:
if not node.enabled:
continue
if node.is_group and not node.matches_filters:
self.do_call_back(
call_back=call_back,
res=logging.INFO,
msg=f"Group {node.name} did not match filters, skipped",
data=None,
is_progress=False,
)
continue
res = node.execute(**kwargs)
if self.stop_processing:
return res
if res:
if node.output_type == ipc.IO_IMAGE:
images.append(res["image"])
elif node.output_type == ipc.IO_MASK:
images.append(res["mask"])
elif node.output_type == ipc.IO_ROI:
add_roi(
wrapper=wrapper,
roi_list=rois,
roi=res.get("roi", None),
node=node,
)
else:
self.last_result["outcome"] = False
if node.uuid == target_module:
self.stop_processing = True
return node.last_result
if self.merge_mode == ipc.MERGE_MODE_AND:
res = wrapper.multi_and(images)
else:
res = wrapper.multi_or(images)
if self.output_type == ipc.IO_IMAGE:
wrapper.current_image = res
is_current_image_changed = True
elif self.output_type == ipc.IO_MASK:
wrapper.mask = res
else:
self.do_call_back(
call_back=call_back,
res=logging.ERROR,
msg=f'Invalid output type "{self.output_type}" for merge mode "{self.merge_mode}" in {self.name}',
data=None,
is_progress=False,
)
self.last_result["outcome"] = False
else:
pass
if only_rois and rois:
self.last_result["roi"] = rois
self.last_result["image"] = wrapper.draw_rois(
img=wrapper.current_image, rois=rois
)
elif is_current_image_changed or (len(wrapper.image_list) == 0):
self.last_result["image"] = wrapper.current_image
else:
self.last_result["image"] = wrapper.image_list[-1]["image"]
self.last_result["mask"] = wrapper.mask
self.last_result["data"] = wrapper.csv_data_holder.data_list
if self.is_root:
if self.parent.settings.mosaic.enabled:
self.root.parent.mosaic = wrapper.build_mosaic(
image_names=self.parent.settings.mosaic.images,
images_dict=self.parent.stored_mosaic_images,
)
self.do_call_back(
call_back=call_back,
res=logging.INFO,
msg=f"Pipeline processed in {format_time(timer() - before)}",
data={
"name": f"{wrapper.luid}_final_mosaic",
"image": self.root.parent.mosaic,
"data": self.last_result["data"],
"plant_name": "unknown" if wrapper is None else wrapper.plant,
"luid": wrapper.luid,
},
force_call_back=True,
is_progress=False,
)
else:
self.do_call_back(
call_back=call_back,
res=logging.INFO,
msg=f"Processed {wrapper.luid} in {format_time(timer() - before)}",
data=self
if self.root.parent.show_group_result or self.root.parent.silent
else None,
force_call_back=True,
is_progress=False,
)
elif not target_module:
self.do_call_back(
call_back=call_back,
res=logging.INFO,
msg=f"Successfully processed {self.name}, merge mode: {self.merge_mode} in {format_time(timer() - before)}",
data=self if self.root.parent.show_group_result else None,
is_progress=False,
)
return self.last_result
def copy(self, parent):
return GroupNode(
parent=parent,
merge_mode=self.merge_mode,
name=self.name,
source=self.source,
nodes=[node.copy(parent=self) for node in self.nodes],
execute_filters=self.execute_filters,
)
def to_code(self, indent: int):
pass
def get_parent(self, item):
for node in self.nodes:
if hasattr(node, "uuid"):
if item.uuid == node.uuid:
return self
elif isinstance(node, GroupNode):
parent = node.get_parent(item)
if parent is not None:
return parent
return None
def to_json(self):
return dict(
node_type="group",
merge_mode=self.merge_mode,
name=self.name,
uuid=self.uuid,
source=self.source,
no_delete=self.no_delete,
nodes=[node.to_json() for node in self.nodes],
execute_filters=self.execute_filters,
)
@classmethod
def from_json(cls, parent, json_data: dict):
res = cls(
parent=parent,
merge_mode=json_data["merge_mode"],
name=json_data["name"],
uuid=json_data["uuid"],
no_delete=json_data["no_delete"],
source=json_data["source"],
execute_filters=json_data.get(
"execute_filters", cls.default_execution_filters
),
)
for node in json_data["nodes"]:
if node["node_type"] == "module":
res.nodes.append(ModuleNode.from_json(parent=res, json_data=node))
elif node["node_type"] == "group":
res.nodes.append(GroupNode.from_json(parent=res, json_data=node))
else:
eh.log_data(
log_msg=f"Unknown node type: {node['node_type']}",
log_level=logging.ERROR,
target_logger=logger,
)
return res
def modules(self):
return [node for node in self.nodes if isinstance(node, ModuleNode)]
def groups(self):
return [node for node in self.nodes if isinstance(node, GroupNode)]
def module(self, index) -> ModuleNode:
lst = self.modules()
if len(lst) > index:
return lst[index]
else:
return None
def group(self, index) -> Node:
lst = self.groups()
if len(lst) > index:
return lst[index]
else:
return None
def iter_items(self, types: tuple = ("groups", "modules")):
def parse_children_(parent):
for node in parent.nodes:
if (("groups" in types) and isinstance(node, GroupNode)) or (
("modules" in types) and isinstance(node, ModuleNode)
):
yield node
if isinstance(node, GroupNode):
yield from parse_children_(node)
if (("groups" in types) and isinstance(self, GroupNode)) or (
("modules" in types) and isinstance(self, ModuleNode)
):
yield self
yield from parse_children_(self)
def as_pivot_list(self, index, types: tuple = ("groups", "modules")) -> dict:
nodes = [node for node in self.iter_items(types)]
if index not in nodes:
return {}
res = {"before": [], "pivot": index, "after": []}
matched_uuid = False
for node in nodes:
if node.uuid == index.uuid:
matched_uuid = True
continue
if matched_uuid:
res["after"].append(node)
else:
res["before"].append(node)
return res
def find_by_uuid(self, uuid):
if self.uuid == uuid:
return self
for node in self.iter_items():
if node.uuid == uuid:
return node
else:
return None
def find_by_name(self, name):
for node in self.iter_items():
if node.name == name:
return node
else:
return None
def check_input(self, node) -> bool:
if isinstance(node, GroupNode):
if self.merge_mode in [ipc.MERGE_MODE_AND, ipc.MERGE_MODE_OR]:
has_image, has_mask = False, False
for node in self.nodes:
if node.output_type in [ipc.IO_DATA, ipc.IO_NONE, ipc.IO_ROI]:
return False
elif node.output_type == ipc.IO_IMAGE:
has_image = True
elif node.output_type == ipc.IO_MASK:
has_mask = True
else:
return False
if has_image and has_mask:
return False
if isinstance(node, GroupNode) and node.module_count > 0:
n = node.module(0)
else:
n = node
if isinstance(n, GroupNode):
return True
pivot_list = self.as_pivot_list(index=n, types=("modules"))
if not pivot_list:
return False
if len(pivot_list["before"]) > 0:
if n.input_type == ipc.IO_DATA:
needed_output = ipc.IO_DATA
elif n.input_type == ipc.IO_IMAGE:
return True
elif n.input_type == ipc.IO_MASK:
needed_output = ipc.IO_MASK
elif n.input_type == ipc.IO_NONE:
return True
elif n.input_type == ipc.IO_ROI:
needed_output = ipc.IO_ROI
for node in pivot_list["before"]:
if node.output_type in needed_output:
return True
else:
return False
else:
return n.input_type in (ipc.IO_IMAGE)
def invalidate(self, node):
pivot_list = self.as_pivot_list(index=node)
node.last_result = {}
for node in pivot_list["after"]:
if isinstance(node, ModuleNode):
node.invalidate()
elif isinstance(node, GroupNode):
node.last_result = {}
@property
def input_type(self):
if len(self.nodes) == 0:
return ipc.IO_NONE
else:
return self.nodes[0].input_type
@property
def output_type(self):
if len(self.nodes) == 0:
return ipc.IO_NONE
else:
return self.nodes[-1].output_type
@property
def node_count(self):
return len(self.nodes)
@property
def group_count(self):
return len(self.groups())
@property
def module_count(self):
return len(self.modules())
@property
def enabled(self):
if self.node_count == 0:
return 0
else:
has_enabled = False
has_disabled = False
for node in self.nodes:
if isinstance(node, GroupNode):
enabled_state = node.enabled
if enabled_state == 0:
has_disabled = True
elif enabled_state == 1:
return 1
elif enabled_state == 2:
has_enabled = True
elif isinstance(node, ModuleNode):
if node.enabled:
has_enabled = True
else:
has_disabled = True
if has_enabled and has_disabled:
return 1
return 2 if has_enabled else 0
@enabled.setter
def enabled(self, value):
for node in self.nodes:
node.enabled = value
@property
def matches_filters(self):
wrapper = self.root.parent.wrapper
for k, v in self.execute_filters.items():
current_filter = [filter_ for filter_ in IptParam.decode_string(v) if filter_]
if not current_filter:
continue
if not hasattr(wrapper, k) or getattr(wrapper, k) not in current_filter:
return False
return True
class LoosePipeline(object):
def __init__(self, **kwargs):
self.root: GroupNode = GroupNode(
merge_mode=ipc.MERGE_MODE_CHAIN, name="Pipeline", parent=self
)
self.target_data_base = None
self.settings: PipelineSettings = PipelineSettings(pipeline=self)
self.last_wrapper_luid = ""
self.use_cache = True
self.image_output_path = ""
self.name = ""
self.description = "Please insert description"
self.stored_mosaic_images = {}
self._stop_processing = False
self.set_template(kwargs.get("template", None))
self.silent = False
self.mosaic = None
self.wrapper: BaseImageProcessor = None
self.error_level = logging.INFO
self.set_callbacks()
def __repr__(self):
return json.dumps(self.to_json(), indent=2, sort_keys=False)
def __str__(self):
return f"Pipeline {self.name}"
def set_template(self, template):
if isinstance(template, str):
if template == "default":
self.root.add_group(
name="Fix image",
merge_mode=ipc.MERGE_MODE_CHAIN,
source="source",
uuid="fix_image",
)
self.root.add_group(
name="Pre process image",
merge_mode=ipc.MERGE_MODE_CHAIN,
source=self.root.group(0).uuid,
uuid="pre_process_image",
)
self.root.add_group(
name="Build mask",
merge_mode=ipc.MERGE_MODE_AND,
source=self.root.group(1).uuid,
uuid="build_mask",
)
self.root.add_group(
name="Clean mask",
merge_mode=ipc.MERGE_MODE_CHAIN,
source=self.root.group(1).uuid,
uuid="clean_mask",
)
self.root.add_group(
name="Extract features",
merge_mode=ipc.MERGE_MODE_CHAIN,
source=self.root.group(0).uuid,
uuid="extract_features",
)
elif template == "legacy":
self.root.add_group(
name="ROIs from raw image",
merge_mode=ipc.MERGE_MODE_NONE,
source="source",
uuid="roi_raw",
)
self.root.add_group(
name="Fix image",
merge_mode=ipc.MERGE_MODE_CHAIN,
source="source",
uuid="fix_image",
)
self.root.add_group(
name="Pre process image",
merge_mode=ipc.MERGE_MODE_CHAIN,
source="fix_image",
uuid="pre_process_image",
)
self.root.add_group(
name="ROIs from raw pre processed image",
merge_mode=ipc.MERGE_MODE_NONE,
source="pre_process_image",
uuid="roi_pre_processed",
)
self.root.add_group(
name="Build mask",
merge_mode=ipc.MERGE_MODE_AND,
source="pre_process_image",
uuid="build_mask",
)
self.root.add_group(
name="Apply ROIS",
merge_mode=ipc.MERGE_MODE_CHAIN,
source=self.root.group(1).uuid,
uuid="apply_roi",
)
self.root.add_group(
name="Clean mask",
merge_mode=ipc.MERGE_MODE_CHAIN,
source="pre_process_image",
uuid="clean_mask",
)
self.root.add_group(
name="Assert mask position",
merge_mode=ipc.MERGE_MODE_NONE,
source=self.root.group(1).uuid,
uuid="assert_mask_position",
)
self.root.add_group(
name="Extract features",
merge_mode=ipc.MERGE_MODE_CHAIN,
source="fix_image",
uuid="extract_features",
)
self.root.add_group(
name="Build images",
merge_mode=ipc.MERGE_MODE_CHAIN,
source="fix_image",
uuid="build_images",
)
def add_module(self, operator, target_group: str = "") -> bool:
if not target_group:
target_group = self.root
else:
target_group = self.root.find_by_uuid(target_group)
if target_group is None or operator is None:
return False
target_group.add_module(tool=operator)
return True
def execute(
self,
src_image: Union[str, BaseImageProcessor],
silent_mode: bool = False,
additional_data: dict = {},
write_data: bool = False,
target_data_base=None,
overwrite_data: bool = False,
store_images: bool = True,
options=None,
**kwargs,
):
# Override settings
self.error_level = logging.INFO
self.stop_processing = False
self.silent = silent_mode
self.text_result = ""
# Build/retrieve wrapper
if isinstance(src_image, str):
self.wrapper = BaseImageProcessor(
src_image,
options=options,
database=target_data_base,
)
elif isinstance(src_image, BaseImageProcessor):
self.wrapper = src_image
else:
logger.error(f"Unknown source {str(src_image)}")
self.error_level = logging.ERROR
self.text_result = "Source error"
return False
# Check wrapper
if self.wrapper is None:
if isinstance(src_image, str):
logger.error(f"Unable to build wrapper from file '{src_image}''")
else:
logger.error("Unable to retrieve wrapper.")
self.error_level = logging.ERROR
self.text_result = "Wrapper error"
return False
elif os.path.isfile(self.wrapper.csv_file_path) and overwrite_data is False:
self.error_level = logging.INFO
self.text_result = "Skipped"
return True
elif not self.wrapper.check_source_image():
if isinstance(src_image, str):
logger.error(f"Image seems to be corrupted '{src_image}''")
else:
logger.error("Image seems to be corrupted.")
self.text_result = "Corrupted image"
self.error_level = logging.ERROR
return False
# Override wrapper settings
if self.last_wrapper_luid != self.wrapper.luid:
self.invalidate()
self.last_wrapper_luid = self.wrapper.luid
self.wrapper.lock = True
self.wrapper.target_database = target_data_base
self.wrapper.store_images = store_images and (
self.root.parent.debug_mode or bool(kwargs.get("target_module", ""))
)
for module in self.root.iter_items(types=("modules",)):
self.wrapper.forced_storage_images_list.extend(module.tool.required_images)
if self.image_output_path:
pass
elif options is None:
self.image_output_path = ipso_folders.get_path(
key="image_output",
force_creation=False,
)
else:
self.image_output_path = self.wrapper.dst_path
# Prepare data holder
self.wrapper.init_data_holder()
# Execute pipeline
self.root.execute(**kwargs)
# Update data with forced key value pairs
for k, v in additional_data.items():
self.wrapper.csv_data_holder.update_csv_value(key=k, value=v, force_pair=True)
if write_data is True:
try:
with open(self.wrapper.csv_file_path, "w", newline="") as csv_file_:
wr = csv.writer(csv_file_, quoting=csv.QUOTE_NONE)
wr.writerow(self.wrapper.csv_data_holder.header_to_list())
wr.writerow(self.wrapper.csv_data_holder.data_to_list())
except Exception as e:
logger.exception(f"Failed to write image data because {repr(e)}")
index = kwargs.get("index", -1)
total = kwargs.get("total", -1)
if index >= 0 and total >= 0:
eh.log_data(
log_msg=(
f'{"OK" if self.error_level < self.stop_on else "FAIL"} - '
+ f"{(index + 1):{len(str(total))}d}/{total} >>> "
+ self.wrapper.name
),
log_level=self.error_level,
target_logger=logger,
)
index = kwargs.get("index", -1)
total = kwargs.get("total", -1)
if index >= 0 and total >= 0:
eh.log_data(
log_msg=(
f'{"OK" if self.error_level < self.stop_on else "FAIL"} - '
+ f"{(index + 1):{len(str(total))}d}/{total} >>> "
+ self.wrapper.name
),
log_level=self.error_level,
target_logger=logger,
)
return self.error_level < self.stop_on
def targeted_callback(self, param: IptParam):
if param.name == "debug_mode":
if self.root.nodes:
self.root.invalidate(self.root)
else:
print(f"{param.name} was set")
def set_callbacks(self):
p = self.settings.find_by_name(name="debug_mode")
if p is not None:
p.on_change = self.targeted_callback
def invalidate(self):
self.stored_mosaic_images = {}
for node in self.root.iter_items():
if isinstance(node, ModuleNode):
node.invalidate()
elif isinstance(node, GroupNode):
node.last_result = {}
def save(self, file_name: str) -> bool:
try:
with open(file_name, "w") as f:
json.dump(self.to_json(), f, indent=2)
except Exception as e:
logger.exception(
f'Failed to save pipeline "{repr(e)}"',
)
return False
else:
return True
@classmethod
def load(cls, file_name: str):
with open(file_name, "r") as f:
return cls.from_json(json_data=json.load(f))
def copy(self):
return self.__class__.from_json(self.to_json())
def get_parent(self, item: Union[GroupNode, ModuleNode]) -> GroupNode:
return self.root.get_parent(item=item)
def remove_item(self, item: Union[GroupNode, ModuleNode]):
self.root.remove_node(item)
def to_code(self):
pass
def to_json(self):
save_dict = {
"title": "IPSO Phen pipeline V2",
"name": self.name,
"description": self.description,
"date": dt.now().strftime("%Y_%b_%d_%H-%M-%S"),
"version": last_script_version,
}
# Add settings
save_dict["settings"] = self.settings.params_to_dict()
# Add root node
save_dict["Pipeline"] = self.root.to_json()
return save_dict
@classmethod
def from_json(cls, json_data: dict):
if json_data["title"].lower() == "ipso phen pipeline v2":
res = cls()
res.name = json_data["name"]
res.description = json_data["description"]
res.settings = PipelineSettings(pipeline=res, **json_data["settings"])
res.root = GroupNode.from_json(parent=res, json_data=json_data["Pipeline"])
elif json_data["title"].lower() == "ipso phen pipeline":
res = cls(template="default_groups")
tmp = IptStrictPipeline.from_json(json_data=json_data)
# Import basic data
res.name = tmp.name
res.description = "Pipeline imported from old format, please check data"
# Import settings
for setting in res.settings.gizmos:
p = tmp.settings.find_by_name(setting.name)
if p is not None:
setting.value = p.value
# create groups
res.set_template(template="legacy")
# Import nodes & modules
for uuid, kinds in zip(
[
"roi_raw",
"fix_image",
"pre_process_image",
"roi_pre_processed",
"build_mask",
],
[
ipc.ToolFamily.ROI_RAW_IMAGE_STR,
[ipc.ToolFamily.WHITE_BALANCE, ipc.ToolFamily.EXPOSURE_FIXING],
ipc.ToolFamily.PRE_PROCESSING,
ipc.ToolFamily.ROI_PP_IMAGE_STR,
ipc.ToolFamily.THRESHOLD,
],
):
src_group = tmp.get_operators(constraints={"kind": kinds})
dst_group = res.root.find_by_uuid(uuid=uuid)
for tool_dict in src_group:
dst_group.add_module(
tool=tool_dict["tool"].copy(),
enabled=tool_dict["enabled"],
uuid=tool_dict["uuid"],
)
res.root.find_by_uuid(uuid="build_mask").merge_mode = (
ipc.MERGE_MODE_AND
if tmp.merge_method == "multi_and"
else ipc.MERGE_MODE_OR
)
rois = tmp.get_operators(
constraints={
"kind": [
ipc.ToolFamily.ROI_PP_IMAGE_STR,
ipc.ToolFamily.ROI_RAW_IMAGE_STR,
]
}
)
dst_group = res.root.find_by_uuid(uuid="apply_roi")
for tool_dict in rois:
ipt = tool_dict["tool"]
roi_type = ipt.get_value_of("roi_type")
if roi_type not in [
"keep",
"delete",
"erode",
"dilate",
"open",
"close",
]:
continue
dst_group.add_module(
tool=get_ipt_class(class_name="IptApplyRoi")(
roi_names=ipt.get_value_of("roi_name"),
roi_selection_mode="all_named",
roi_type=roi_type,
input_source="mask",
output_mode="mask",
)
)
dst_group = res.root.find_by_uuid(uuid="assert_mask_position")
for tool_dict in rois:
ipt = tool_dict["tool"]
if ipt.get_value_of("roi_type") not in ["enforce"]:
continue
dst_group.add_module(
tool=get_ipt_class(class_name="IptAssertMaskPosition")(
roi_names=ipt.get_value_of("roi_name"),
roi_selection_mode="all_named",
)
)
for uuid, kinds in zip(
["clean_mask", "extract_features", "build_images"],
[
ipc.ToolFamily.MASK_CLEANUP,
ipc.ToolFamily.FEATURE_EXTRACTION,
ipc.ToolFamily.IMAGE_GENERATOR,
],
):
src_group = tmp.get_operators(constraints={"kind": kinds})
dst_group = res.root.find_by_uuid(uuid=uuid)
for tool_dict in src_group:
dst_group.add_module(
tool=tool_dict["tool"].copy(),
enabled=tool_dict["enabled"],
uuid=tool_dict["uuid"],
)
else:
res = cls()
res.name = (
f'Failed to load unknown pipeline type "{json_data["title"].lower()}"'
)
if res.stop_on < 10:
res.stop_on = 35
res.set_callbacks()
return res
def update_error_level(self, error_level):
self.error_level = max(self.error_level, error_level)
@property
def node_count(self):
return len(self.root.nodes)
@property
def threshold_only(self):
return self.settings.get_value_of("threshold_only") == 1
@threshold_only.setter
def threshold_only(self, value):
self.settings.set_value_of(
key="threshold_only", value=1 if value is True else 0, update_widgets=False
)
@property
def debug_mode(self):
return self.settings.get_value_of("debug_mode") == 1
@debug_mode.setter
def debug_mode(self, value):
self.settings.set_value_of(
key="debug_mode", value=1 if value is True else 0, update_widgets=False
)
@property
def show_tool_result(self):
return self.settings.get_value_of("show_tool_result") == 1
@show_tool_result.setter
def show_tool_result(self, value):
self.settings.set_value_of(
key="show_tool_result", value=1 if value is True else 0, update_widgets=False
)
@property
def show_group_result(self):
return self.settings.get_value_of("show_group_result") == 1
@show_group_result.setter
def show_group_result(self, value):
self.settings.set_value_of(
key="show_group_result", value=1 if value is True else 0, update_widgets=False
)
@property
def tool_group_name_watermark(self):
return self.settings.get_value_of("tool_group_name_watermark") == 1
@tool_group_name_watermark.setter
def tool_group_name_watermark(self, value):
self.settings.set_value_of(
key="tool_group_name_watermark",
value=1 if value is True else 0,
update_widgets=False,
)
@property
def stop_on(self) -> int:
return self.settings.get_value_of("stop_on")
@stop_on.setter
def stop_on(self, value: int):
self.settings.set_value_of("stop_on", value)
@property
def last_image(self):
return self.settings.get_value_of("last_image")
@last_image.setter
def last_image(self, value):
self.settings.set_value_of("last_image", value)
@property
def allow_step_mosaics(self):
return self.settings.get_value_of("allow_step_mosaics")
@allow_step_mosaics.setter
def allow_step_mosaics(self, value):
self.settings.set_value_of("allow_step_mosaics", value)
@property
def show_source_image(self):
return self.settings.get_value_of("show_source_image")
@show_source_image.setter
def show_source_image(self, value):
self.settings.set_value_of("show_source_image", value)
@property
def stop_processing(self):
return self._stop_processing or self.error_level >= self.stop_on
@stop_processing.setter
def stop_processing(self, value):
self._stop_processing = value
| true
| true
|
1c413eede860299c08955cb73bada66067d1d4f6
| 3,685
|
py
|
Python
|
configs/ttflevelnet/ttflnet_r18_enh_mdcns8_1224_b12d2_1x.py
|
mrsempress/mmdetection
|
cb650560c97a2fe56a9b369a1abc8ec17e06583a
|
[
"Apache-2.0"
] | null | null | null |
configs/ttflevelnet/ttflnet_r18_enh_mdcns8_1224_b12d2_1x.py
|
mrsempress/mmdetection
|
cb650560c97a2fe56a9b369a1abc8ec17e06583a
|
[
"Apache-2.0"
] | null | null | null |
configs/ttflevelnet/ttflnet_r18_enh_mdcns8_1224_b12d2_1x.py
|
mrsempress/mmdetection
|
cb650560c97a2fe56a9b369a1abc8ec17e06583a
|
[
"Apache-2.0"
] | null | null | null |
# model settings
model = dict(
type='TTFNet',
pretrained='modelzoo://resnet18',
backbone=dict(
type='ResNet',
depth=18,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_eval=False,
style='pytorch'),
neck=None,
bbox_head=dict(
type='TTFLevelHead',
inplanes=(64, 128, 256, 512),
planes=(256, 128, 64),
down_ratio_b1=8,
down_ratio_b2=4,
hm_head_channels=(128, 64),
wh_head_channels=(32, 32),
hm_head_conv_num=(2, 2),
wh_head_conv_num=(2, 2),
num_classes=81,
wh_scale_factor_b1=8.,
wh_scale_factor_b2=8.,
shortcut_cfg=(1, 2, 3),
extra_shortcut_cfg=3,
alpha=0.6,
beta=0.6,
max_objs=128,
hm_weight_b1=2.2,
wh_weight_b1=11.,
hm_weight_b2=1.,
wh_weight_b2=5.,
b1_min_length=64,
b2_max_length=64,
mdcn_before_s8=True,
inf_branch=['b1', 'b2'],
use_simple_nms=True,
# get_bboxesv2=True,
# consider_score=True,
conv_cfg=None,
norm_cfg=dict(type='BN')))
cudnn_benchmark = True
# training and testing settings
train_cfg = dict(debug=False)
test_cfg = dict(score_thr=0.01, max_per_img=100)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(512, 512), keep_ratio=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(512, 512),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=16,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(
type='SGD',
lr=0.002,
momentum=0.9,
weight_decay=0.0004,
paramwise_options=dict(bias_lr_mult=2., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 5,
step=[9, 11])
checkpoint_config = dict(save_every_n_steps=200, max_to_keep=1, keep_every_n_epochs=9)
# yapf:disable
log_config = dict(interval=20)
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/ttfv2net_r18_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
| 29.717742
| 86
| 0.621981
|
model = dict(
type='TTFNet',
pretrained='modelzoo://resnet18',
backbone=dict(
type='ResNet',
depth=18,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_eval=False,
style='pytorch'),
neck=None,
bbox_head=dict(
type='TTFLevelHead',
inplanes=(64, 128, 256, 512),
planes=(256, 128, 64),
down_ratio_b1=8,
down_ratio_b2=4,
hm_head_channels=(128, 64),
wh_head_channels=(32, 32),
hm_head_conv_num=(2, 2),
wh_head_conv_num=(2, 2),
num_classes=81,
wh_scale_factor_b1=8.,
wh_scale_factor_b2=8.,
shortcut_cfg=(1, 2, 3),
extra_shortcut_cfg=3,
alpha=0.6,
beta=0.6,
max_objs=128,
hm_weight_b1=2.2,
wh_weight_b1=11.,
hm_weight_b2=1.,
wh_weight_b2=5.,
b1_min_length=64,
b2_max_length=64,
mdcn_before_s8=True,
inf_branch=['b1', 'b2'],
use_simple_nms=True,
conv_cfg=None,
norm_cfg=dict(type='BN')))
cudnn_benchmark = True
train_cfg = dict(debug=False)
test_cfg = dict(score_thr=0.01, max_per_img=100)
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(512, 512), keep_ratio=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(512, 512),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=16,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
optimizer = dict(
type='SGD',
lr=0.002,
momentum=0.9,
weight_decay=0.0004,
paramwise_options=dict(bias_lr_mult=2., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 5,
step=[9, 11])
checkpoint_config = dict(save_every_n_steps=200, max_to_keep=1, keep_every_n_epochs=9)
log_config = dict(interval=20)
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/ttfv2net_r18_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
| true
| true
|
1c413f0c6c48d231c8c9a0d72162ab5fbe5a184d
| 972
|
py
|
Python
|
api/views.py
|
antonybholmes/edbw-django-app
|
2268fa7f983347742ee55145269ef1222776a838
|
[
"MIT"
] | null | null | null |
api/views.py
|
antonybholmes/edbw-django-app
|
2268fa7f983347742ee55145269ef1222776a838
|
[
"MIT"
] | null | null | null |
api/views.py
|
antonybholmes/edbw-django-app
|
2268fa7f983347742ee55145269ef1222776a838
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.http import HttpResponse
from django.http import JsonResponse
import base64
def about(request):
return JsonResponse({"name":"edbw","version":"10.0","copyright":"Copyright (C) 2014-2019 Antony Holmes"}, safe=False)
def json_page_resp(name, page, paginator):
"""
Returns a standardized page response
"""
page_rows = paginator.get_page(page)
return JsonResponse({'page':page, 'pages':paginator.num_pages, name:[x['json'] for x in page_rows], 'size':len(page_rows)}, safe=False)
def json_resp(rows):
"""
For rows containing a json field.
"""
return JsonResponse([x['json'] for x in rows], safe=False)
def test(request):
print(request.META)
print(base64.b64decode(request.headers['Authorization'][6:]))
print(base64.b64decode(request.headers['Authorization'].split(' ')[1]).decode('utf-8').split(':'))
return JsonResponse(request.headers, safe=False)
| 29.454545
| 139
| 0.691358
|
from django.shortcuts import render
from django.http import HttpResponse
from django.http import JsonResponse
import base64
def about(request):
return JsonResponse({"name":"edbw","version":"10.0","copyright":"Copyright (C) 2014-2019 Antony Holmes"}, safe=False)
def json_page_resp(name, page, paginator):
page_rows = paginator.get_page(page)
return JsonResponse({'page':page, 'pages':paginator.num_pages, name:[x['json'] for x in page_rows], 'size':len(page_rows)}, safe=False)
def json_resp(rows):
return JsonResponse([x['json'] for x in rows], safe=False)
def test(request):
print(request.META)
print(base64.b64decode(request.headers['Authorization'][6:]))
print(base64.b64decode(request.headers['Authorization'].split(' ')[1]).decode('utf-8').split(':'))
return JsonResponse(request.headers, safe=False)
| true
| true
|
1c413f15125157ec009507190c422d87a08b1a46
| 548
|
py
|
Python
|
pettingzoo/utils/wrappers/capture_stdout.py
|
vstark21/PettingZoo
|
0ebd8fb540e195f9dd91d996f190e9a89dedcf26
|
[
"Apache-2.0"
] | 3
|
2021-10-14T19:53:19.000Z
|
2022-02-22T01:14:44.000Z
|
pettingzoo/utils/wrappers/capture_stdout.py
|
vstark21/PettingZoo
|
0ebd8fb540e195f9dd91d996f190e9a89dedcf26
|
[
"Apache-2.0"
] | null | null | null |
pettingzoo/utils/wrappers/capture_stdout.py
|
vstark21/PettingZoo
|
0ebd8fb540e195f9dd91d996f190e9a89dedcf26
|
[
"Apache-2.0"
] | 1
|
2022-02-22T11:06:40.000Z
|
2022-02-22T11:06:40.000Z
|
from ..capture_stdout import capture_stdout
from .base import BaseWrapper
class CaptureStdoutWrapper(BaseWrapper):
def __init__(self, env):
super().__init__(env)
self.metadata['render.modes'].append("ansi")
def render(self, mode="human"):
if mode == "human":
super().render()
elif mode == "ansi":
with capture_stdout() as stdout:
super().render()
val = stdout.getvalue()
return val
def __str__(self):
return str(self.env)
| 23.826087
| 52
| 0.574818
|
from ..capture_stdout import capture_stdout
from .base import BaseWrapper
class CaptureStdoutWrapper(BaseWrapper):
def __init__(self, env):
super().__init__(env)
self.metadata['render.modes'].append("ansi")
def render(self, mode="human"):
if mode == "human":
super().render()
elif mode == "ansi":
with capture_stdout() as stdout:
super().render()
val = stdout.getvalue()
return val
def __str__(self):
return str(self.env)
| true
| true
|
1c413f828e94e3b740b15c03f550800b97795557
| 23,545
|
py
|
Python
|
uml/__init__.py
|
palibhasataolamang/paml
|
f3f2c113f0925e42557820416199bd6eaaeb091c
|
[
"MIT"
] | null | null | null |
uml/__init__.py
|
palibhasataolamang/paml
|
f3f2c113f0925e42557820416199bd6eaaeb091c
|
[
"MIT"
] | null | null | null |
uml/__init__.py
|
palibhasataolamang/paml
|
f3f2c113f0925e42557820416199bd6eaaeb091c
|
[
"MIT"
] | null | null | null |
import os
import posixpath
from collections import Counter
from typing import List, Set, Iterable
from sbol_factory import SBOLFactory, UMLFactory
import sbol3
# Load ontology and create uml submodule
SBOLFactory('uml_submodule',
posixpath.join(os.path.dirname(os.path.realpath(__file__)),
'uml.ttl'),
'http://bioprotocols.org/uml#')
# Import submodule symbols into top-level uml module
from uml_submodule import *
from .uml_graphviz import *
# Workaround for pySBOL3 issue #231: should be applied to every iteration on a collection of SBOL objects
# TODO: delete after resolution of pySBOL3 issue #231
def id_sort(i: iter):
sortable = list(i)
sortable.sort(key=lambda x: x.identity if isinstance(x, sbol3.Identified) else x)
return sortable
###########################################
# Define extension methods for ValueSpecification
# TODO: move constants into ontology after resolution of https://github.com/SynBioDex/sbol_factory/issues/14
PARAMETER_IN = 'http://bioprotocols.org/uml#in'
PARAMETER_OUT = 'http://bioprotocols.org/uml#out'
def literal(value, reference: bool = False) -> LiteralSpecification:
"""Construct a UML LiteralSpecification based on the value of the literal passed
Parameters
----------
value: the value to embed as a literal
reference: if true, use a reference for a non-TopLevel SBOL rather than embedding as a child object
Returns
-------
LiteralSpecification of the appropriate type for the value
"""
if isinstance(value, LiteralSpecification):
return literal(value.value, reference) # if it's a literal, unwrap and rebuild
elif value is None:
return LiteralNull()
elif isinstance(value, str):
return LiteralString(value=value)
elif isinstance(value, int):
return LiteralInteger(value=value)
elif isinstance(value, bool):
return LiteralBoolean(value=value)
elif isinstance(value, float):
return LiteralReal(value=value)
elif isinstance(value, sbol3.TopLevel) or (reference and isinstance(value, sbol3.Identified)):
return LiteralReference(value=value)
elif isinstance(value, sbol3.Identified):
return LiteralIdentified(value=value)
else:
raise ValueError(f'Don\'t know how to make literal from {type(value)} "{value}"')
###########################################
# Define extension methods for Behavior
def behavior_add_parameter(self, name: str, param_type: str, direction: str, optional: bool = False,
default_value: ValueSpecification = None) -> OrderedPropertyValue:
"""Add a Parameter for this Behavior; usually not called directly
Note: Current assumption is that cardinality is either [0..1] or 1
:param name: name of the parameter, which will also be used for pins
:param param_type: URI specifying the type of object that is expected for this parameter
:param direction: should be in or out
:param optional: True if the Parameter is optional; default is False
:param default_value: must be specified if Parameter is optional
:return: Parameter that has been added
"""
param = Parameter(name=name, type=param_type, direction=direction, is_ordered=True, is_unique=True)
ordered_param = OrderedPropertyValue(index=len(self.parameters), property_value=param)
print(param, ordered_param)
print(ordered_param.__class__.__mro__)
self.parameters.append(ordered_param)
param.upper_value = literal(1) # all parameters are assumed to have cardinality [0..1] or 1 for now
if optional:
param.lower_value = literal(0)
else:
param.lower_value = literal(1)
if default_value:
param.default_value = default_value
return ordered_param
Behavior.add_parameter = behavior_add_parameter # Add to class via monkey patch
def behavior_add_input(self, name: str, param_type: str, optional: bool = False,
default_value: ValueSpecification = None) -> OrderedPropertyValue:
"""Add an input Parameter for this Behavior
Note: Current assumption is that cardinality is either [0..1] or 1
:param name: name of the parameter, which will also be used for pins
:param param_type: URI specifying the type of object that is expected for this parameter
:param optional: True if the Parameter is optional; default is False
:param default_value: default value for this parameter
:return: Parameter that has been added
"""
return self.add_parameter(name, param_type, PARAMETER_IN, optional, default_value)
Behavior.add_input = behavior_add_input # Add to class via monkey patch
def behavior_add_output(self, name, param_type) -> OrderedPropertyValue:
"""Add an output Parameter for this Behavior
:param name: name of the parameter, which will also be used for pins
:param param_type: URI specifying the type of object that is expected for this parameter
:return: Parameter that has been added
"""
return self.add_parameter(name, param_type, PARAMETER_OUT)
Behavior.add_output = behavior_add_output # Add to class via monkey patch
def behavior_get_inputs(self) -> Iterable[Parameter]:
"""Return all Parameters of type input for this Behavior
Note: assumes that type is all either in or out
Returns
-------
Iterator over Parameters
"""
return (p for p in self.parameters if p.property_value.direction == PARAMETER_IN)
Behavior.get_inputs = behavior_get_inputs # Add to class via monkey patch
def behavior_get_input(self, name) -> Parameter:
"""Return a specific input Parameter for this Behavior
Note: assumes that type is all either in or out
Returns
-------
Parameter, or Value error
"""
print(p for p in self.get_inputs())
found = [p for p in self.get_inputs() if p.property_value.name == name]
if len(found) == 0:
raise ValueError(f'Behavior {self.identity} has no input parameter named {name}')
elif len(found) > 1:
raise ValueError(f'Behavior {self.identity} has multiple input parameters named {name}')
else:
return found[0]
Behavior.get_input = behavior_get_input # Add to class via monkey patch
def behavior_get_required_inputs(self):
"""Return all required Parameters of type input for this Behavior
Note: assumes that type is all either in or out
Returns
-------
Iterator over Parameters
"""
return (p for p in self.get_inputs() if p.property_value.lower_value.value > 0)
Behavior.get_required_inputs = behavior_get_required_inputs # Add to class via monkey patch
def behavior_get_outputs(self):
"""Return all Parameters of type output for this Behavior
Note: assumes that type is all either in or out
Returns
-------
Iterator over Parameters
"""
return (p for p in self.parameters if p.property_value.direction == PARAMETER_OUT)
Behavior.get_outputs = behavior_get_outputs # Add to class via monkey patch
def behavior_get_output(self, name) -> Parameter:
"""Return a specific input Parameter for this Behavior
Note: assumes that type is all either in or out
Returns
-------
Parameter, or Value error
"""
found = [p for p in self.get_outputs() if p.name == name]
if len(found) == 0:
raise ValueError(f'Behavior {self.identity} has no output parameter named {name}')
elif len(found) > 1:
raise ValueError(f'Behavior {self.identity} has multiple output parameters named {name}')
else:
return found[0]
Behavior.get_output = behavior_get_output # Add to class via monkey patch
def behavior_get_required_outputs(self):
"""Return all required Parameters of type output for this Behavior
Note: assumes that type is all either in or out
Returns
-------
Iterator over Parameters
"""
return (p for p in self.get_outputs() if p.property_value.lower_value.value > 0)
Behavior.get_required_outputs = behavior_get_required_outputs # Add to class via monkey patch
###########################################
# Define extension methods for ActivityNode
def activitynode_unpin(self: ActivityNode) -> ActivityNode:
"""Find the root node for an ActivityNode: either itself if a Pin, otherwise the owning Action
Parameters
----------
self: ActivityNode
Returns
-------
self if not a Pin, otherwise the owning Action
"""
if isinstance(self,Pin):
action = self.get_parent()
if not isinstance(action,Action):
raise ValueError(f'Parent of {self.identity} should be Action, but found {type(action)} instead')
return action
else:
return self
ActivityNode.unpin = activitynode_unpin # Add to class via monkey patch
###########################################
# Define extension methods for CallBehaviorAction
def call_behavior_action_input_pin(self, pin_name: str):
"""Find an input pin on the action with the specified name
:param pin_name:
:return: Pin with specified name
"""
pin_set = {x for x in self.inputs if x.name == pin_name}
if len(pin_set) == 0:
raise ValueError(f'Could not find input pin named {pin_name}')
if len(pin_set) > 1:
raise ValueError(f'Found more than one input pin named {pin_name}')
return pin_set.pop()
CallBehaviorAction.input_pin = call_behavior_action_input_pin # Add to class via monkey patch
def call_behavior_action_output_pin(self, pin_name: str):
"""Find an output pin on the action with the specified name
:param pin_name:
:return: Pin with specified name
"""
pin_set = {x for x in self.outputs if x.name == pin_name}
if len(pin_set) == 0:
raise ValueError(f'Could not find output pin named {pin_name}')
if len(pin_set) > 1:
raise ValueError(f'Found more than one output pin named {pin_name}')
return pin_set.pop()
CallBehaviorAction.output_pin = call_behavior_action_output_pin # Add to class via monkey patch
def call_behavior_action_pin_parameter(self, pin_name: str):
"""Find the behavior parameter corresponding to the pin
:param pin_name:
:return: Parameter with specified name
"""
try:
pin = self.input_pin(pin_name)
except:
try:
pin = self.output_pin(pin_name)
except:
raise ValueError(f'Could not find pin named {pin_name}')
behavior = self.behavior.lookup()
[parameter] = [p for p in behavior.parameters if p.property_value.name == pin_name]
return parameter
CallBehaviorAction.pin_parameter = call_behavior_action_pin_parameter # Add to class via monkey patch
def add_call_behavior_action(parent: Activity, behavior: Behavior, **input_pin_literals):
"""Create a call to a Behavior and add it to an Activity
:param parent: Activity to which the behavior is being added
:param behavior: Behavior to be called
:param input_pin_literals: map of literal values to be assigned to specific pins
:return: newly constructed
"""
# first, make sure that all of the keyword arguments are in the inputs of the behavior
unmatched_keys = [key for key in input_pin_literals.keys() if key not in (i.property_value.name for i in behavior.get_inputs())]
if unmatched_keys:
raise ValueError(f'Specification for "{behavior.display_id}" does not have inputs: {unmatched_keys}')
# create action
action = CallBehaviorAction(behavior=behavior)
parent.nodes.append(action)
# Instantiate input pins
for i in id_sort(behavior.get_inputs()):
if i.property_value.name in input_pin_literals:
value = input_pin_literals[i.property_value.name]
# TODO: type check relationship between value and parameter type specification
action.inputs.append(ValuePin(name=i.property_value.name, is_ordered=i.property_value.is_ordered,
is_unique=i.property_value.is_unique, value=literal(value)))
else: # if not a constant, then just a generic InputPin
action.inputs.append(InputPin(name=i.property_value.name, is_ordered=i.property_value.is_ordered,
is_unique=i.property_value.is_unique))
# Instantiate output pins
for o in id_sort(behavior.get_outputs()):
action.outputs.append(OutputPin(name=o.property_value.name, is_ordered=o.property_value.is_ordered,
is_unique=o.property_value.is_unique))
return action
###########################################
# Define extension methods for Activity
def activity_initial(self):
"""Find or create an initial node in an Activity.
Note that while UML allows multiple initial nodes, use of this routine assumes a single one is sufficient.
:return: InitialNode for Activity
"""
initial = [a for a in self.nodes if isinstance(a, InitialNode)]
if not initial:
self.nodes.append(InitialNode())
return self.initial()
elif len(initial) == 1:
return initial[0]
else:
raise ValueError(f'Activity "{self.display_id}" assumed to have one initial node, but found {len(initial)}')
Activity.initial = activity_initial # Add to class via monkey patch
def activity_final(self):
"""Find or create a final node in a Activity
Note that while UML allows multiple final nodes, use of this routine assumes a single is sufficient.
:return: FinalNode for Activity
"""
final = [a for a in self.nodes if isinstance(a, FinalNode)]
if not final:
self.nodes.append(FlowFinalNode())
return self.final()
elif len(final) == 1:
return final[0]
else:
raise ValueError(f'Activity "{self.display_id}" assumed to have one initial node, but found {len(initial)}')
Activity.final = activity_final # Add to class via monkey patch
def activity_input_value(self, name: str, param_type: str, optional: bool = False,
default_value: ValueSpecification = None) -> ActivityParameterNode:
"""Add an input, then return the ActivityParameterNode that refers to that input
:param self: Activity
:param name: Name of the input
:param param_type: type of value expected for the input
:param optional: True if the Parameter is optional; default is False
:param default_value: if the input is optional, a default value must be set
:return: ActivityParameterNode associated with the input
"""
parameter = self.add_input(name=name, param_type=param_type, optional=optional, default_value=default_value)
node = ActivityParameterNode(parameter=parameter)
self.nodes.append(node)
return node
Activity.input_value = activity_input_value # Add to class via monkey patch
def activity_designate_output(self, name: str, param_type: str, source: ActivityNode) -> ActivityParameterNode:
"""Add an output, connect it to an ActivityParameterNode, and get its value from the designated node
:param self: Activity
:param name: Name of the output
:param param_type: type of value expected for the output
:param source: ActivityNode whose ObjectValue output should be routed to the source
:return: ActivityParameterNode associated with the output
"""
parameter = self.add_output(name=name, param_type=param_type)
node = ActivityParameterNode(parameter=parameter)
self.nodes.append(node)
self.use_value(source, node)
return node
Activity.designate_output = activity_designate_output # Add to class via monkey patch
def activity_initiating_nodes(self) -> List[ActivityNode]:
"""Find all InitialNode and ActivityParameterNode activities.
These should be the only activities with no in-flow, which can thus initiate execution.
Parameters
----------
self: Activity
Returns
-------
List of ActivityNodes able to initiate execution
"""
return [n for n in self.nodes if isinstance(n, InitialNode) or
(isinstance(n, ActivityParameterNode) and n.parameter and n.parameter.lookup().property_value.direction == PARAMETER_IN)]
Activity.initiating_nodes = activity_initiating_nodes # Add to class via monkey patch
def activity_incoming_edges(self, node: ActivityNode) -> Set[ActivityEdge]:
"""Find the edges that have the designated node as a target
Parameters
----------
node: target for edges
Returns
-------
Set of ActivityEdges with node as a target
"""
return {e for e in self.edges if e.target == node.identity} # TODO: change to pointer lookup after pySBOL #237
Activity.incoming_edges = activity_incoming_edges # Add to class via monkey patch
def activity_outgoing_edges(self, node: ActivityNode) -> Set[ActivityEdge]:
"""Find the edges that have the designated node as a source
Parameters
----------
node: target for edges
Returns
-------
Set of ActivityEdges with node as a source
"""
return {e for e in self.edges if e.source == node.identity} # TODO: change to pointer lookup after pySBOL #237
Activity.outgoing_edges = activity_outgoing_edges # Add to class via monkey patch
def activity_deconflict_objectflow_sources(self, source: ActivityNode) -> ActivityNode:
'''Avoid nondeterminism in ObjectFlows by injecting ForkNode objects where necessary
Parameters
----------
self: Activity
source: node to take a value from, directly or indirectly
Returns
-------
A source to attach to; either the original or an intervening ForkNode
'''
# Use original if it's one of the node types that supports multiple dispatch
if isinstance(source, ForkNode) or isinstance(source, DecisionNode):
return source
# Otherwise, find out what targets currently attach:
current_outflows = [e for e in self.edges if e.source.lookup() is source]
# Use original if nothing is attached to it
if len(current_outflows) == 0:
#print(f'No prior use of {source.identity}, connecting directly')
return source
# If the flow goes to a single ForkNode, connect to that ForkNode
elif len(current_outflows) == 1 and isinstance(current_outflows[0].target.lookup(),ForkNode):
#print(f'Found an existing fork from {source.identity}, reusing')
return current_outflows[0].target.lookup()
# Otherwise, inject a ForkNode and connect all current flows to that instead
else:
#print(f'Found no existing fork from {source.identity}, injecting one')
fork = ForkNode()
self.nodes.append(fork)
self.edges.append(ObjectFlow(source=source, target=fork))
for f in current_outflows:
f.source = fork # change over the existing flows
return fork
Activity.deconflict_objectflow_sources = activity_deconflict_objectflow_sources
def activity_call_behavior(self, behavior: Behavior, **input_pin_map):
"""Call a Behavior as an Action in an Activity
:param behavior: Activity to be invoked (object or name)
:param input_pin_map: literal value or ActivityNode mapped to names of Behavior parameters
:return: CallBehaviorAction that invokes the Behavior
"""
# Any ActivityNode in the pin map will be withheld for connecting via object flows instead
activity_inputs = {k: v for k, v in input_pin_map.items() if isinstance(v, ActivityNode)}
non_activity_inputs = {k: v for k, v in input_pin_map.items() if k not in activity_inputs}
cba = add_call_behavior_action(self, behavior, **non_activity_inputs)
# add flows for activities being connected implicitly
for name, source in id_sort(activity_inputs.items()):
self.use_value(source, cba.input_pin(name))
return cba
Activity.call_behavior = activity_call_behavior # Add to class via monkey patch
def activity_order(self, source: ActivityNode, target: ActivityNode):
"""Add a ControlFlow between the designated source and target nodes in an Activity
:param source: ActivityNode that is the source of the control flow
:param target: ActivityNode that is the target of the control flow
:return: ControlFlow created between source and target
"""
if source not in self.nodes:
raise ValueError(f'Source node {source.identity} is not a member of activity {self.identity}')
if target not in self.nodes:
raise ValueError(f'Target node {target.identity} is not a member of activity {self.identity}')
flow = ControlFlow(source=source, target=target)
self.edges.append(flow)
return flow
Activity.order = activity_order # Add to class via monkey patch
def activity_use_value(self, source: ActivityNode, target: ActivityNode) -> ObjectFlow:
"""Add an ObjectFlow transferring a value between the designated source and target nodes in an Activity
Typically, these activities will be either Action Pins or ActivityParameterNodes serving as input or output
:param source: ActivityNode that is the source of the value
:param target: ActivityNode that receives the value
:return: ObjectFlow created between source and target
"""
if source.get_toplevel() is not self: # check via toplevel, because pins are not directly in the node list
raise ValueError(f'Source node {source.identity} is not a member of activity {self.identity}')
if target.get_toplevel() is not self:
raise ValueError(f'Target node {target.identity} is not a member of activity {self.identity}')
source = self.deconflict_objectflow_sources(source)
flow = ObjectFlow(source=source, target=target)
self.edges.append(flow)
return flow
Activity.use_value = activity_use_value # Add to class via monkey patch
def activity_validate(self, report: sbol3.ValidationReport = None) -> sbol3.ValidationReport:
'''Checks to see if the activity has any undesirable non-deterministic edges
Parameters
----------
self
report
Returns
-------
'''
report = super(Activity, self).validate(report)
# Check for objects with multiple outgoing ObjectFlow edges that are not of type ForkNode or DecisionNode
source_counts = Counter([e.source.lookup() for e in self.edges if isinstance(e,ObjectFlow)])
multi_targets = {n: c for n, c in source_counts.items() if c>1 and not (isinstance(n,ForkNode) or isinstance(n,DecisionNode))}
for n, c in multi_targets.items():
report.addWarning(n.identity, None, f'ActivityNode has {c} outgoing edges: multi-edges can cause nondeterministic flow')
# Check that incoming flow counts obey constraints:
target_counts = Counter([e.target.lookup().unpin() for e in self.edges
if isinstance(e.target.lookup(), ActivityNode) ])
# No InitialNode should have an incoming flow (though an ActivityParameterNode may)
initial_with_inflow = {n: c for n, c in target_counts.items() if isinstance(n,InitialNode)}
for n, c in initial_with_inflow.items():
report.addError(n.identity, None, f'InitialNode must have no incoming edges, but has {c}')
# No node besides initiating nodes (InitialNode or ActivityParameterNode) should have no incoming flows
missing_inflow = set(self.nodes) - {n for n, c in target_counts.items()} - set(self.initiating_nodes())
for n in missing_inflow:
report.addWarning(n.identity, None, f'Node has no incoming edges, so cannot be executed')
return report
Activity.validate = activity_validate
# TODO: add a check for loops that can obtain too many or too few values
| 41.672566
| 133
| 0.707199
|
import os
import posixpath
from collections import Counter
from typing import List, Set, Iterable
from sbol_factory import SBOLFactory, UMLFactory
import sbol3
SBOLFactory('uml_submodule',
posixpath.join(os.path.dirname(os.path.realpath(__file__)),
'uml.ttl'),
'http://bioprotocols.org/uml#')
from uml_submodule import *
from .uml_graphviz import *
x.identity if isinstance(x, sbol3.Identified) else x)
return sortable
t know how to make literal from {type(value)} "{value}"')
ult_value: ValueSpecification = None) -> OrderedPropertyValue:
return self.add_parameter(name, param_type, PARAMETER_IN, optional, default_value)
Behavior.add_input = behavior_add_input
def behavior_add_output(self, name, param_type) -> OrderedPropertyValue:
return self.add_parameter(name, param_type, PARAMETER_OUT)
Behavior.add_output = behavior_add_output
def behavior_get_inputs(self) -> Iterable[Parameter]:
return (p for p in self.parameters if p.property_value.direction == PARAMETER_IN)
Behavior.get_inputs = behavior_get_inputs
def behavior_get_input(self, name) -> Parameter:
print(p for p in self.get_inputs())
found = [p for p in self.get_inputs() if p.property_value.name == name]
if len(found) == 0:
raise ValueError(f'Behavior {self.identity} has no input parameter named {name}')
elif len(found) > 1:
raise ValueError(f'Behavior {self.identity} has multiple input parameters named {name}')
else:
return found[0]
Behavior.get_input = behavior_get_input
def behavior_get_required_inputs(self):
return (p for p in self.get_inputs() if p.property_value.lower_value.value > 0)
Behavior.get_required_inputs = behavior_get_required_inputs
def behavior_get_outputs(self):
return (p for p in self.parameters if p.property_value.direction == PARAMETER_OUT)
Behavior.get_outputs = behavior_get_outputs
def behavior_get_output(self, name) -> Parameter:
found = [p for p in self.get_outputs() if p.name == name]
if len(found) == 0:
raise ValueError(f'Behavior {self.identity} has no output parameter named {name}')
elif len(found) > 1:
raise ValueError(f'Behavior {self.identity} has multiple output parameters named {name}')
else:
return found[0]
Behavior.get_output = behavior_get_output
def behavior_get_required_outputs(self):
return (p for p in self.get_outputs() if p.property_value.lower_value.value > 0)
Behavior.get_required_outputs = behavior_get_required_outputs
y in input_pin_literals.keys() if key not in (i.property_value.name for i in behavior.get_inputs())]
if unmatched_keys:
raise ValueError(f'Specification for "{behavior.display_id}" does not have inputs: {unmatched_keys}')
action = CallBehaviorAction(behavior=behavior)
parent.nodes.append(action)
for i in id_sort(behavior.get_inputs()):
if i.property_value.name in input_pin_literals:
value = input_pin_literals[i.property_value.name]
action.inputs.append(ValuePin(name=i.property_value.name, is_ordered=i.property_value.is_ordered,
is_unique=i.property_value.is_unique, value=literal(value)))
else:
action.inputs.append(InputPin(name=i.property_value.name, is_ordered=i.property_value.is_ordered,
is_unique=i.property_value.is_unique))
for o in id_sort(behavior.get_outputs()):
action.outputs.append(OutputPin(name=o.property_value.name, is_ordered=o.property_value.is_ordered,
is_unique=o.property_value.is_unique))
return action
default_value: ValueSpecification = None) -> ActivityParameterNode:
parameter = self.add_input(name=name, param_type=param_type, optional=optional, default_value=default_value)
node = ActivityParameterNode(parameter=parameter)
self.nodes.append(node)
return node
Activity.input_value = activity_input_value
def activity_designate_output(self, name: str, param_type: str, source: ActivityNode) -> ActivityParameterNode:
parameter = self.add_output(name=name, param_type=param_type)
node = ActivityParameterNode(parameter=parameter)
self.nodes.append(node)
self.use_value(source, node)
return node
Activity.designate_output = activity_designate_output
def activity_initiating_nodes(self) -> List[ActivityNode]:
return [n for n in self.nodes if isinstance(n, InitialNode) or
(isinstance(n, ActivityParameterNode) and n.parameter and n.parameter.lookup().property_value.direction == PARAMETER_IN)]
Activity.initiating_nodes = activity_initiating_nodes
def activity_incoming_edges(self, node: ActivityNode) -> Set[ActivityEdge]:
return {e for e in self.edges if e.target == node.identity} ivity.incoming_edges = activity_incoming_edges
def activity_outgoing_edges(self, node: ActivityNode) -> Set[ActivityEdge]:
return {e for e in self.edges if e.source == node.identity} ivity.outgoing_edges = activity_outgoing_edges
def activity_deconflict_objectflow_sources(self, source: ActivityNode) -> ActivityNode:
if isinstance(source, ForkNode) or isinstance(source, DecisionNode):
return source
# Otherwise, find out what targets currently attach:
current_outflows = [e for e in self.edges if e.source.lookup() is source]
# Use original if nothing is attached to it
if len(current_outflows) == 0:
#print(f'No prior use of {source.identity}, connecting directly')
return source
# If the flow goes to a single ForkNode, connect to that ForkNode
elif len(current_outflows) == 1 and isinstance(current_outflows[0].target.lookup(),ForkNode):
#print(f'Found an existing fork from {source.identity}, reusing')
return current_outflows[0].target.lookup()
# Otherwise, inject a ForkNode and connect all current flows to that instead
else:
#print(f'Found no existing fork from {source.identity}, injecting one')
fork = ForkNode()
self.nodes.append(fork)
self.edges.append(ObjectFlow(source=source, target=fork))
for f in current_outflows:
f.source = fork # change over the existing flows
return fork
Activity.deconflict_objectflow_sources = activity_deconflict_objectflow_sources
def activity_call_behavior(self, behavior: Behavior, **input_pin_map):
# Any ActivityNode in the pin map will be withheld for connecting via object flows instead
activity_inputs = {k: v for k, v in input_pin_map.items() if isinstance(v, ActivityNode)}
non_activity_inputs = {k: v for k, v in input_pin_map.items() if k not in activity_inputs}
cba = add_call_behavior_action(self, behavior, **non_activity_inputs)
# add flows for activities being connected implicitly
for name, source in id_sort(activity_inputs.items()):
self.use_value(source, cba.input_pin(name))
return cba
Activity.call_behavior = activity_call_behavior # Add to class via monkey patch
def activity_order(self, source: ActivityNode, target: ActivityNode):
if source not in self.nodes:
raise ValueError(f'Source node {source.identity} is not a member of activity {self.identity}')
if target not in self.nodes:
raise ValueError(f'Target node {target.identity} is not a member of activity {self.identity}')
flow = ControlFlow(source=source, target=target)
self.edges.append(flow)
return flow
Activity.order = activity_order # Add to class via monkey patch
def activity_use_value(self, source: ActivityNode, target: ActivityNode) -> ObjectFlow:
if source.get_toplevel() is not self: # check via toplevel, because pins are not directly in the node list
raise ValueError(f'Source node {source.identity} is not a member of activity {self.identity}')
if target.get_toplevel() is not self:
raise ValueError(f'Target node {target.identity} is not a member of activity {self.identity}')
source = self.deconflict_objectflow_sources(source)
flow = ObjectFlow(source=source, target=target)
self.edges.append(flow)
return flow
Activity.use_value = activity_use_value # Add to class via monkey patch
def activity_validate(self, report: sbol3.ValidationReport = None) -> sbol3.ValidationReport:
report = super(Activity, self).validate(report)
# Check for objects with multiple outgoing ObjectFlow edges that are not of type ForkNode or DecisionNode
source_counts = Counter([e.source.lookup() for e in self.edges if isinstance(e,ObjectFlow)])
multi_targets = {n: c for n, c in source_counts.items() if c>1 and not (isinstance(n,ForkNode) or isinstance(n,DecisionNode))}
for n, c in multi_targets.items():
report.addWarning(n.identity, None, f'ActivityNode has {c} outgoing edges: multi-edges can cause nondeterministic flow')
# Check that incoming flow counts obey constraints:
target_counts = Counter([e.target.lookup().unpin() for e in self.edges
if isinstance(e.target.lookup(), ActivityNode) ])
# No InitialNode should have an incoming flow (though an ActivityParameterNode may)
initial_with_inflow = {n: c for n, c in target_counts.items() if isinstance(n,InitialNode)}
for n, c in initial_with_inflow.items():
report.addError(n.identity, None, f'InitialNode must have no incoming edges, but has {c}')
# No node besides initiating nodes (InitialNode or ActivityParameterNode) should have no incoming flows
missing_inflow = set(self.nodes) - {n for n, c in target_counts.items()} - set(self.initiating_nodes())
for n in missing_inflow:
report.addWarning(n.identity, None, f'Node has no incoming edges, so cannot be executed')
return report
Activity.validate = activity_validate
# TODO: add a check for loops that can obtain too many or too few values
| true
| true
|
1c41404389143339eb4f57dd0181b52f80ff8ebc
| 3,571
|
py
|
Python
|
cassiopeia/dto/championmasteryapi.py
|
BubblegumDiscord/LoLTrivia
|
70070dc2e8c59d7210d8b30ad2525bafaee4bba7
|
[
"MIT"
] | null | null | null |
cassiopeia/dto/championmasteryapi.py
|
BubblegumDiscord/LoLTrivia
|
70070dc2e8c59d7210d8b30ad2525bafaee4bba7
|
[
"MIT"
] | null | null | null |
cassiopeia/dto/championmasteryapi.py
|
BubblegumDiscord/LoLTrivia
|
70070dc2e8c59d7210d8b30ad2525bafaee4bba7
|
[
"MIT"
] | 1
|
2018-05-06T15:49:33.000Z
|
2018-05-06T15:49:33.000Z
|
import cassiopeia.dto.requests
import cassiopeia.type.core.common
import cassiopeia.type.dto.championmastery
def get_champion_mastery(summoner_id, champion_id):
"""
https://developer.riotgames.com/api/methods#!/1034/3545
Args:
summoner_id (int): the summoner ID to get champion mastery for
champion_id (int): the champion ID for the desired champion
Returns:
list<ChampionMastery>: the summoner's champion mastery value for the specified champion
"""
region = cassiopeia.type.core.common.Region(cassiopeia.dto.requests.region)
platform = cassiopeia.type.core.common.Platform[region.name]
# Get JSON response
request = "https://{server}.api.pvp.net/championmastery/location/{platform}/player/{summonerId}/champion/{championId}".format(server=cassiopeia.dto.requests.region, platform=platform.value, summonerId=summoner_id, championId=champion_id)
return cassiopeia.type.dto.championmastery.ChampionMastery(cassiopeia.dto.requests.get(request, include_base=False))
def get_champion_masteries(summoner_id):
"""
https://developer.riotgames.com/api/methods#!/1034/3544
Args:
summoner_id (int): the summoner ID to get champion masteries for
Returns:
list<ChampionMastery>: the summoner's champion masteries
"""
region = cassiopeia.type.core.common.Region(cassiopeia.dto.requests.region)
platform = cassiopeia.type.core.common.Platform[region.name]
# Get JSON response
request = "https://{server}.api.pvp.net/championmastery/location/{platform}/player/{summonerId}/champions".format(server=cassiopeia.dto.requests.region, platform=platform.value, summonerId=summoner_id)
response = cassiopeia.dto.requests.get(request, include_base=False)
# Convert response to Dto type
return [cassiopeia.type.dto.championmastery.ChampionMastery(cm) for cm in response]
def get_champion_mastery_score(summoner_id):
"""
https://developer.riotgames.com/api/methods#!/1034/3546
Args:
summoner_id (int): the summoner ID to get champion masteries for
Returns:
int: the summoner's total champion mastery score
"""
region = cassiopeia.type.core.common.Region(cassiopeia.dto.requests.region)
platform = cassiopeia.type.core.common.Platform[region.name]
# Get JSON response
request = "https://{server}.api.pvp.net/championmastery/location/{platform}/player/{summonerId}/score".format(server=cassiopeia.dto.requests.region, platform=platform.value, summonerId=summoner_id)
return cassiopeia.dto.requests.get(request, include_base=False)
def get_top_champion_masteries(summoner_id, count=3):
"""
https://developer.riotgames.com/api/methods#!/1034/3540
Args:
summoner_id (int): the summoner ID to get champion masteries for
count (int): the maximum number of entires to retrieve (default 3)
Returns:
list<ChampionMastery>: the summoner's top champion masteries
"""
region = cassiopeia.type.core.common.Region(cassiopeia.dto.requests.region)
platform = cassiopeia.type.core.common.Platform[region.name]
# Get JSON response
request = "https://{server}.api.pvp.net/championmastery/location/{platform}/player/{summonerId}/topchampions".format(server=cassiopeia.dto.requests.region, platform=platform.value, summonerId=summoner_id)
response = cassiopeia.dto.requests.get(request, {"count": count}, include_base=False)
# Convert response to Dto type
return [cassiopeia.type.dto.championmastery.ChampionMastery(cm) for cm in response]
| 42.511905
| 241
| 0.745449
|
import cassiopeia.dto.requests
import cassiopeia.type.core.common
import cassiopeia.type.dto.championmastery
def get_champion_mastery(summoner_id, champion_id):
region = cassiopeia.type.core.common.Region(cassiopeia.dto.requests.region)
platform = cassiopeia.type.core.common.Platform[region.name]
request = "https://{server}.api.pvp.net/championmastery/location/{platform}/player/{summonerId}/champion/{championId}".format(server=cassiopeia.dto.requests.region, platform=platform.value, summonerId=summoner_id, championId=champion_id)
return cassiopeia.type.dto.championmastery.ChampionMastery(cassiopeia.dto.requests.get(request, include_base=False))
def get_champion_masteries(summoner_id):
region = cassiopeia.type.core.common.Region(cassiopeia.dto.requests.region)
platform = cassiopeia.type.core.common.Platform[region.name]
request = "https://{server}.api.pvp.net/championmastery/location/{platform}/player/{summonerId}/champions".format(server=cassiopeia.dto.requests.region, platform=platform.value, summonerId=summoner_id)
response = cassiopeia.dto.requests.get(request, include_base=False)
return [cassiopeia.type.dto.championmastery.ChampionMastery(cm) for cm in response]
def get_champion_mastery_score(summoner_id):
region = cassiopeia.type.core.common.Region(cassiopeia.dto.requests.region)
platform = cassiopeia.type.core.common.Platform[region.name]
request = "https://{server}.api.pvp.net/championmastery/location/{platform}/player/{summonerId}/score".format(server=cassiopeia.dto.requests.region, platform=platform.value, summonerId=summoner_id)
return cassiopeia.dto.requests.get(request, include_base=False)
def get_top_champion_masteries(summoner_id, count=3):
region = cassiopeia.type.core.common.Region(cassiopeia.dto.requests.region)
platform = cassiopeia.type.core.common.Platform[region.name]
request = "https://{server}.api.pvp.net/championmastery/location/{platform}/player/{summonerId}/topchampions".format(server=cassiopeia.dto.requests.region, platform=platform.value, summonerId=summoner_id)
response = cassiopeia.dto.requests.get(request, {"count": count}, include_base=False)
return [cassiopeia.type.dto.championmastery.ChampionMastery(cm) for cm in response]
| true
| true
|
1c41409be5507ec7c63c1033a60216618fd2e6c2
| 1,327
|
py
|
Python
|
skychain/endpoints/node_endpoint.py
|
OneTesseractInMultiverse/SkyTrack
|
f39c9296f7dfb559dcc33af1776ea2dbbab94e01
|
[
"MIT"
] | null | null | null |
skychain/endpoints/node_endpoint.py
|
OneTesseractInMultiverse/SkyTrack
|
f39c9296f7dfb559dcc33af1776ea2dbbab94e01
|
[
"MIT"
] | 1
|
2017-12-06T02:20:51.000Z
|
2017-12-06T02:20:51.000Z
|
skychain/endpoints/node_endpoint.py
|
OneTesseractInMultiverse/SkyTrack
|
f39c9296f7dfb559dcc33af1776ea2dbbab94e01
|
[
"MIT"
] | null | null | null |
from skychain import sky_app, blockchain, node_id
from flask import jsonify, request
# --------------------------------------------------------------------------
# GET: /API/V1/NODE/
# --------------------------------------------------------------------------
@sky_app.route('/api/v1/node', methods=['POST'])
def post_node():
values = request.get_json()
nodes = values.get('nodes')
if nodes is None:
return jsonify({"msg", "Please supply a valid list of nodes"}), 400
for node in nodes:
blockchain.register_node(node)
response = {
'message': 'New nodes have been added',
'total_nodes': list(blockchain.nodes),
}
return jsonify(response), 201
# --------------------------------------------------------------------------
# GET: /API/V1/NODE/CONFLICT_STATUS
# --------------------------------------------------------------------------
@sky_app.route('/api/v1/node/conflict_status', methods=['GET'])
def consensus():
replaced = blockchain.resolve_conflicts()
if replaced:
response = {
'message': 'Our chain was replaced',
'new_chain': blockchain.chain
}
else:
response = {
'message': 'Our chain is authoritative',
'chain': blockchain.chain
}
return jsonify(response), 200
| 34.025641
| 76
| 0.477016
|
from skychain import sky_app, blockchain, node_id
from flask import jsonify, request
@sky_app.route('/api/v1/node', methods=['POST'])
def post_node():
values = request.get_json()
nodes = values.get('nodes')
if nodes is None:
return jsonify({"msg", "Please supply a valid list of nodes"}), 400
for node in nodes:
blockchain.register_node(node)
response = {
'message': 'New nodes have been added',
'total_nodes': list(blockchain.nodes),
}
return jsonify(response), 201
@sky_app.route('/api/v1/node/conflict_status', methods=['GET'])
def consensus():
replaced = blockchain.resolve_conflicts()
if replaced:
response = {
'message': 'Our chain was replaced',
'new_chain': blockchain.chain
}
else:
response = {
'message': 'Our chain is authoritative',
'chain': blockchain.chain
}
return jsonify(response), 200
| true
| true
|
1c4140b67994c9da77953fe26af386ef913aca28
| 23,927
|
py
|
Python
|
ucsmsdk/mometa/sw/SwEthLanBorder.py
|
Curlyfingers/ucsmsdk
|
982ff2d8faa12ffb88e1f8cba98cf5749f05c93d
|
[
"Apache-2.0"
] | null | null | null |
ucsmsdk/mometa/sw/SwEthLanBorder.py
|
Curlyfingers/ucsmsdk
|
982ff2d8faa12ffb88e1f8cba98cf5749f05c93d
|
[
"Apache-2.0"
] | null | null | null |
ucsmsdk/mometa/sw/SwEthLanBorder.py
|
Curlyfingers/ucsmsdk
|
982ff2d8faa12ffb88e1f8cba98cf5749f05c93d
|
[
"Apache-2.0"
] | null | null | null |
"""This module contains the general information for SwEthLanBorder ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class SwEthLanBorderConsts:
FSM_PREV_DEPLOY_BEGIN = "DeployBegin"
FSM_PREV_DEPLOY_FAIL = "DeployFail"
FSM_PREV_DEPLOY_SUCCESS = "DeploySuccess"
FSM_PREV_DEPLOY_UPDATE_CONNECTIVITY = "DeployUpdateConnectivity"
FSM_PREV_DEPLOY_UPDATE_VLAN_GROUPS = "DeployUpdateVlanGroups"
FSM_PREV_DEPLOY_WAIT_ON_SW_CONFIG_HOST = "DeployWaitOnSwConfigHost"
FSM_PREV_NOP = "nop"
FSM_RMT_INV_ERR_CODE_ERR_2FA_AUTH_RETRY = "ERR-2fa-auth-retry"
FSM_RMT_INV_ERR_CODE_ERR_ACTIVATE_FAILED = "ERR-ACTIVATE-failed"
FSM_RMT_INV_ERR_CODE_ERR_ACTIVATE_IN_PROGRESS = "ERR-ACTIVATE-in-progress"
FSM_RMT_INV_ERR_CODE_ERR_ACTIVATE_RETRY = "ERR-ACTIVATE-retry"
FSM_RMT_INV_ERR_CODE_ERR_BIOS_TOKENS_OLD_BIOS = "ERR-BIOS-TOKENS-OLD-BIOS"
FSM_RMT_INV_ERR_CODE_ERR_BIOS_TOKENS_OLD_CIMC = "ERR-BIOS-TOKENS-OLD-CIMC"
FSM_RMT_INV_ERR_CODE_ERR_BIOS_NETWORK_BOOT_ORDER_NOT_FOUND = "ERR-BIOS-network-boot-order-not-found"
FSM_RMT_INV_ERR_CODE_ERR_BOARDCTRLUPDATE_IGNORE = "ERR-BOARDCTRLUPDATE-ignore"
FSM_RMT_INV_ERR_CODE_ERR_DIAG_CANCELLED = "ERR-DIAG-cancelled"
FSM_RMT_INV_ERR_CODE_ERR_DIAG_FSM_RESTARTED = "ERR-DIAG-fsm-restarted"
FSM_RMT_INV_ERR_CODE_ERR_DIAG_TEST_FAILED = "ERR-DIAG-test-failed"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_AUTHENTICATION_FAILURE = "ERR-DNLD-authentication-failure"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_HOSTKEY_MISMATCH = "ERR-DNLD-hostkey-mismatch"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_INVALID_IMAGE = "ERR-DNLD-invalid-image"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_NO_FILE = "ERR-DNLD-no-file"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_NO_SPACE = "ERR-DNLD-no-space"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_USB_UNMOUNTED = "ERR-DNLD-usb-unmounted"
FSM_RMT_INV_ERR_CODE_ERR_DNS_DELETE_ERROR = "ERR-DNS-delete-error"
FSM_RMT_INV_ERR_CODE_ERR_DNS_GET_ERROR = "ERR-DNS-get-error"
FSM_RMT_INV_ERR_CODE_ERR_DNS_SET_ERROR = "ERR-DNS-set-error"
FSM_RMT_INV_ERR_CODE_ERR_DIAGNOSTICS_IN_PROGRESS = "ERR-Diagnostics-in-progress"
FSM_RMT_INV_ERR_CODE_ERR_DIAGNOSTICS_MEMTEST_IN_PROGRESS = "ERR-Diagnostics-memtest-in-progress"
FSM_RMT_INV_ERR_CODE_ERR_DIAGNOSTICS_NETWORK_IN_PROGRESS = "ERR-Diagnostics-network-in-progress"
FSM_RMT_INV_ERR_CODE_ERR_FILTER_ILLEGAL_FORMAT = "ERR-FILTER-illegal-format"
FSM_RMT_INV_ERR_CODE_ERR_FSM_NO_SUCH_STATE = "ERR-FSM-no-such-state"
FSM_RMT_INV_ERR_CODE_ERR_HOST_FRU_IDENTITY_MISMATCH = "ERR-HOST-fru-identity-mismatch"
FSM_RMT_INV_ERR_CODE_ERR_HTTP_SET_ERROR = "ERR-HTTP-set-error"
FSM_RMT_INV_ERR_CODE_ERR_HTTPS_SET_ERROR = "ERR-HTTPS-set-error"
FSM_RMT_INV_ERR_CODE_ERR_IBMC_ANALYZE_RESULTS = "ERR-IBMC-analyze-results"
FSM_RMT_INV_ERR_CODE_ERR_IBMC_CONNECT_ERROR = "ERR-IBMC-connect-error"
FSM_RMT_INV_ERR_CODE_ERR_IBMC_CONNECTOR_INFO_RETRIEVAL_ERROR = "ERR-IBMC-connector-info-retrieval-error"
FSM_RMT_INV_ERR_CODE_ERR_IBMC_FRU_RETRIEVAL_ERROR = "ERR-IBMC-fru-retrieval-error"
FSM_RMT_INV_ERR_CODE_ERR_IBMC_INVALID_END_POINT_CONFIG = "ERR-IBMC-invalid-end-point-config"
FSM_RMT_INV_ERR_CODE_ERR_IBMC_RESULTS_NOT_READY = "ERR-IBMC-results-not-ready"
FSM_RMT_INV_ERR_CODE_ERR_MAX_SUBSCRIPTIONS_ALLOWED_ERROR = "ERR-MAX-subscriptions-allowed-error"
FSM_RMT_INV_ERR_CODE_ERR_MO_CONFIG_CHILD_OBJECT_CANT_BE_CONFIGURED = "ERR-MO-CONFIG-child-object-cant-be-configured"
FSM_RMT_INV_ERR_CODE_ERR_MO_META_NO_SUCH_OBJECT_CLASS = "ERR-MO-META-no-such-object-class"
FSM_RMT_INV_ERR_CODE_ERR_MO_PROPERTY_NO_SUCH_PROPERTY = "ERR-MO-PROPERTY-no-such-property"
FSM_RMT_INV_ERR_CODE_ERR_MO_PROPERTY_VALUE_OUT_OF_RANGE = "ERR-MO-PROPERTY-value-out-of-range"
FSM_RMT_INV_ERR_CODE_ERR_MO_ACCESS_DENIED = "ERR-MO-access-denied"
FSM_RMT_INV_ERR_CODE_ERR_MO_DELETION_RULE_VIOLATION = "ERR-MO-deletion-rule-violation"
FSM_RMT_INV_ERR_CODE_ERR_MO_DUPLICATE_OBJECT = "ERR-MO-duplicate-object"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_CONTAINMENT = "ERR-MO-illegal-containment"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_CREATION = "ERR-MO-illegal-creation"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_ITERATOR_STATE = "ERR-MO-illegal-iterator-state"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_OBJECT_LIFECYCLE_TRANSITION = "ERR-MO-illegal-object-lifecycle-transition"
FSM_RMT_INV_ERR_CODE_ERR_MO_NAMING_RULE_VIOLATION = "ERR-MO-naming-rule-violation"
FSM_RMT_INV_ERR_CODE_ERR_MO_OBJECT_NOT_FOUND = "ERR-MO-object-not-found"
FSM_RMT_INV_ERR_CODE_ERR_MO_RESOURCE_ALLOCATION = "ERR-MO-resource-allocation"
FSM_RMT_INV_ERR_CODE_ERR_NTP_DELETE_ERROR = "ERR-NTP-delete-error"
FSM_RMT_INV_ERR_CODE_ERR_NTP_GET_ERROR = "ERR-NTP-get-error"
FSM_RMT_INV_ERR_CODE_ERR_NTP_SET_ERROR = "ERR-NTP-set-error"
FSM_RMT_INV_ERR_CODE_ERR_POWER_CAP_UNSUPPORTED = "ERR-POWER-CAP-UNSUPPORTED"
FSM_RMT_INV_ERR_CODE_ERR_POWER_PROFILE_IN_PROGRESS = "ERR-POWER-PROFILE-IN-PROGRESS"
FSM_RMT_INV_ERR_CODE_ERR_SERVER_MIS_CONNECT = "ERR-SERVER-mis-connect"
FSM_RMT_INV_ERR_CODE_ERR_SWITCH_INVALID_IF_CONFIG = "ERR-SWITCH-invalid-if-config"
FSM_RMT_INV_ERR_CODE_ERR_TOKEN_REQUEST_DENIED = "ERR-TOKEN-request-denied"
FSM_RMT_INV_ERR_CODE_ERR_UNABLE_TO_FETCH_BIOS_SETTINGS = "ERR-UNABLE-TO-FETCH-BIOS-SETTINGS"
FSM_RMT_INV_ERR_CODE_ERR_UPDATE_FAILED = "ERR-UPDATE-failed"
FSM_RMT_INV_ERR_CODE_ERR_UPDATE_IN_PROGRESS = "ERR-UPDATE-in-progress"
FSM_RMT_INV_ERR_CODE_ERR_UPDATE_RETRY = "ERR-UPDATE-retry"
FSM_RMT_INV_ERR_CODE_ERR_AAA_CONFIG_MODIFY_ERROR = "ERR-aaa-config-modify-error"
FSM_RMT_INV_ERR_CODE_ERR_ACCT_REALM_SET_ERROR = "ERR-acct-realm-set-error"
FSM_RMT_INV_ERR_CODE_ERR_ADMIN_PASSWD_SET = "ERR-admin-passwd-set"
FSM_RMT_INV_ERR_CODE_ERR_AUTH_ISSUE = "ERR-auth-issue"
FSM_RMT_INV_ERR_CODE_ERR_AUTH_REALM_GET_ERROR = "ERR-auth-realm-get-error"
FSM_RMT_INV_ERR_CODE_ERR_AUTH_REALM_SET_ERROR = "ERR-auth-realm-set-error"
FSM_RMT_INV_ERR_CODE_ERR_AUTHENTICATION = "ERR-authentication"
FSM_RMT_INV_ERR_CODE_ERR_AUTHORIZATION_REQUIRED = "ERR-authorization-required"
FSM_RMT_INV_ERR_CODE_ERR_CLI_SESSION_LIMIT_REACHED = "ERR-cli-session-limit-reached"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_KEYRING = "ERR-create-keyring"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_LOCALE = "ERR-create-locale"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_ROLE = "ERR-create-role"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_TP = "ERR-create-tp"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_USER = "ERR-create-user"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_LOCALE = "ERR-delete-locale"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_ROLE = "ERR-delete-role"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_SESSION = "ERR-delete-session"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_USER = "ERR-delete-user"
FSM_RMT_INV_ERR_CODE_ERR_DOWNGRADE_FAIL = "ERR-downgrade-fail"
FSM_RMT_INV_ERR_CODE_ERR_EFI_DIAGNOSTICS_IN_PROGRESS = "ERR-efi-Diagnostics--in-progress"
FSM_RMT_INV_ERR_CODE_ERR_ENABLE_MGMT_CONN = "ERR-enable-mgmt-conn"
FSM_RMT_INV_ERR_CODE_ERR_EP_SET_ERROR = "ERR-ep-set-error"
FSM_RMT_INV_ERR_CODE_ERR_GET_MAX_HTTP_USER_SESSIONS = "ERR-get-max-http-user-sessions"
FSM_RMT_INV_ERR_CODE_ERR_HTTP_INITIALIZING = "ERR-http-initializing"
FSM_RMT_INV_ERR_CODE_ERR_INSUFFICIENTLY_EQUIPPED = "ERR-insufficiently-equipped"
FSM_RMT_INV_ERR_CODE_ERR_INTERNAL_ERROR = "ERR-internal-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_DELETE_ERROR = "ERR-ldap-delete-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_GET_ERROR = "ERR-ldap-get-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_GROUP_MODIFY_ERROR = "ERR-ldap-group-modify-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_GROUP_SET_ERROR = "ERR-ldap-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_SET_ERROR = "ERR-ldap-set-error"
FSM_RMT_INV_ERR_CODE_ERR_LOCALE_SET_ERROR = "ERR-locale-set-error"
FSM_RMT_INV_ERR_CODE_ERR_MAX_USERID_SESSIONS_REACHED = "ERR-max-userid-sessions-reached"
FSM_RMT_INV_ERR_CODE_ERR_MISSING_METHOD = "ERR-missing-method"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_LOCALE = "ERR-modify-locale"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_ROLE = "ERR-modify-role"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_USER = "ERR-modify-user"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_USER_LOCALE = "ERR-modify-user-locale"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_USER_ROLE = "ERR-modify-user-role"
FSM_RMT_INV_ERR_CODE_ERR_PROVIDER_GROUP_MODIFY_ERROR = "ERR-provider-group-modify-error"
FSM_RMT_INV_ERR_CODE_ERR_PROVIDER_GROUP_SET_ERROR = "ERR-provider-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_RADIUS_GET_ERROR = "ERR-radius-get-error"
FSM_RMT_INV_ERR_CODE_ERR_RADIUS_GLOBAL_SET_ERROR = "ERR-radius-global-set-error"
FSM_RMT_INV_ERR_CODE_ERR_RADIUS_GROUP_SET_ERROR = "ERR-radius-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_RADIUS_SET_ERROR = "ERR-radius-set-error"
FSM_RMT_INV_ERR_CODE_ERR_REQUEST_TIMEOUT = "ERR-request-timeout"
FSM_RMT_INV_ERR_CODE_ERR_RESET_ADAPTER = "ERR-reset-adapter"
FSM_RMT_INV_ERR_CODE_ERR_ROLE_SET_ERROR = "ERR-role-set-error"
FSM_RMT_INV_ERR_CODE_ERR_SECONDARY_NODE = "ERR-secondary-node"
FSM_RMT_INV_ERR_CODE_ERR_SERVICE_NOT_READY = "ERR-service-not-ready"
FSM_RMT_INV_ERR_CODE_ERR_SESSION_CACHE_FULL = "ERR-session-cache-full"
FSM_RMT_INV_ERR_CODE_ERR_SESSION_NOT_FOUND = "ERR-session-not-found"
FSM_RMT_INV_ERR_CODE_ERR_SET_KEY_CERT = "ERR-set-key-cert"
FSM_RMT_INV_ERR_CODE_ERR_SET_LOGIN_PROFILE = "ERR-set-login-profile"
FSM_RMT_INV_ERR_CODE_ERR_SET_MIN_PASSPHRASE_LENGTH = "ERR-set-min-passphrase-length"
FSM_RMT_INV_ERR_CODE_ERR_SET_NETWORK = "ERR-set-network"
FSM_RMT_INV_ERR_CODE_ERR_SET_PASSWORD_STRENGTH_CHECK = "ERR-set-password-strength-check"
FSM_RMT_INV_ERR_CODE_ERR_SET_PORT_CHANNEL = "ERR-set-port-channel"
FSM_RMT_INV_ERR_CODE_ERR_STORE_PRE_LOGIN_BANNER_MSG = "ERR-store-pre-login-banner-msg"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_ENABLE_ERROR = "ERR-tacacs-enable-error"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_GLOBAL_SET_ERROR = "ERR-tacacs-global-set-error"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_GROUP_SET_ERROR = "ERR-tacacs-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_PLUS_GET_ERROR = "ERR-tacacs-plus-get-error"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_SET_ERROR = "ERR-tacacs-set-error"
FSM_RMT_INV_ERR_CODE_ERR_TEST_ERROR_1 = "ERR-test-error-1"
FSM_RMT_INV_ERR_CODE_ERR_TEST_ERROR_2 = "ERR-test-error-2"
FSM_RMT_INV_ERR_CODE_ERR_TIMEZONE_SET_ERROR = "ERR-timezone-set-error"
FSM_RMT_INV_ERR_CODE_ERR_USER_ACCOUNT_EXPIRED = "ERR-user-account-expired"
FSM_RMT_INV_ERR_CODE_ERR_USER_SET_ERROR = "ERR-user-set-error"
FSM_RMT_INV_ERR_CODE_ERR_XML_PARSE_ERROR = "ERR-xml-parse-error"
FSM_RMT_INV_ERR_CODE_NONE = "none"
FSM_STAMP_NEVER = "never"
FSM_STATUS_DEPLOY_BEGIN = "DeployBegin"
FSM_STATUS_DEPLOY_FAIL = "DeployFail"
FSM_STATUS_DEPLOY_SUCCESS = "DeploySuccess"
FSM_STATUS_DEPLOY_UPDATE_CONNECTIVITY = "DeployUpdateConnectivity"
FSM_STATUS_DEPLOY_UPDATE_VLAN_GROUPS = "DeployUpdateVlanGroups"
FSM_STATUS_DEPLOY_WAIT_ON_SW_CONFIG_HOST = "DeployWaitOnSwConfigHost"
FSM_STATUS_NOP = "nop"
NUM_VLAN_LIMIT_EXCEEDED = "exceeded"
NUM_VLAN_LIMIT_OK = "ok"
SWITCH_ID_A = "A"
SWITCH_ID_B = "B"
SWITCH_ID_NONE = "NONE"
UDLD_RECOVERY_ACTION_NONE = "none"
UDLD_RECOVERY_ACTION_RESET = "reset"
class SwEthLanBorder(ManagedObject):
"""This is SwEthLanBorder class."""
consts = SwEthLanBorderConsts()
naming_props = set([])
mo_meta = MoMeta("SwEthLanBorder", "swEthLanBorder", "border-eth", VersionMeta.Version101e, "InputOutput", 0x7f, [], ["read-only"], [u'networkElement'], [u'eventInst', u'faultInst', u'swEthEstcEp', u'swEthEstcPc', u'swEthLanBorderFsm', u'swEthLanBorderFsmTask', u'swEthLanEp', u'swEthLanPc', u'swSubGroup', u'swVlan', u'swVlanGroup'], ["Get"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"deploy_flag": MoPropertyMeta("deploy_flag", "deployFlag", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"fsm_descr": MoPropertyMeta("fsm_descr", "fsmDescr", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"fsm_flags": MoPropertyMeta("fsm_flags", "fsmFlags", "string", VersionMeta.Version211a, MoPropertyMeta.INTERNAL, None, None, None, None, [], ["0-4294967295"]),
"fsm_prev": MoPropertyMeta("fsm_prev", "fsmPrev", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, None, None, None, None, ["DeployBegin", "DeployFail", "DeploySuccess", "DeployUpdateConnectivity", "DeployUpdateVlanGroups", "DeployWaitOnSwConfigHost", "nop"], []),
"fsm_progr": MoPropertyMeta("fsm_progr", "fsmProgr", "byte", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, None, None, None, None, [], ["0-100"]),
"fsm_rmt_inv_err_code": MoPropertyMeta("fsm_rmt_inv_err_code", "fsmRmtInvErrCode", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, None, None, None, None, ["ERR-2fa-auth-retry", "ERR-ACTIVATE-failed", "ERR-ACTIVATE-in-progress", "ERR-ACTIVATE-retry", "ERR-BIOS-TOKENS-OLD-BIOS", "ERR-BIOS-TOKENS-OLD-CIMC", "ERR-BIOS-network-boot-order-not-found", "ERR-BOARDCTRLUPDATE-ignore", "ERR-DIAG-cancelled", "ERR-DIAG-fsm-restarted", "ERR-DIAG-test-failed", "ERR-DNLD-authentication-failure", "ERR-DNLD-hostkey-mismatch", "ERR-DNLD-invalid-image", "ERR-DNLD-no-file", "ERR-DNLD-no-space", "ERR-DNLD-usb-unmounted", "ERR-DNS-delete-error", "ERR-DNS-get-error", "ERR-DNS-set-error", "ERR-Diagnostics-in-progress", "ERR-Diagnostics-memtest-in-progress", "ERR-Diagnostics-network-in-progress", "ERR-FILTER-illegal-format", "ERR-FSM-no-such-state", "ERR-HOST-fru-identity-mismatch", "ERR-HTTP-set-error", "ERR-HTTPS-set-error", "ERR-IBMC-analyze-results", "ERR-IBMC-connect-error", "ERR-IBMC-connector-info-retrieval-error", "ERR-IBMC-fru-retrieval-error", "ERR-IBMC-invalid-end-point-config", "ERR-IBMC-results-not-ready", "ERR-MAX-subscriptions-allowed-error", "ERR-MO-CONFIG-child-object-cant-be-configured", "ERR-MO-META-no-such-object-class", "ERR-MO-PROPERTY-no-such-property", "ERR-MO-PROPERTY-value-out-of-range", "ERR-MO-access-denied", "ERR-MO-deletion-rule-violation", "ERR-MO-duplicate-object", "ERR-MO-illegal-containment", "ERR-MO-illegal-creation", "ERR-MO-illegal-iterator-state", "ERR-MO-illegal-object-lifecycle-transition", "ERR-MO-naming-rule-violation", "ERR-MO-object-not-found", "ERR-MO-resource-allocation", "ERR-NTP-delete-error", "ERR-NTP-get-error", "ERR-NTP-set-error", "ERR-POWER-CAP-UNSUPPORTED", "ERR-POWER-PROFILE-IN-PROGRESS", "ERR-SERVER-mis-connect", "ERR-SWITCH-invalid-if-config", "ERR-TOKEN-request-denied", "ERR-UNABLE-TO-FETCH-BIOS-SETTINGS", "ERR-UPDATE-failed", "ERR-UPDATE-in-progress", "ERR-UPDATE-retry", "ERR-aaa-config-modify-error", "ERR-acct-realm-set-error", "ERR-admin-passwd-set", "ERR-auth-issue", "ERR-auth-realm-get-error", "ERR-auth-realm-set-error", "ERR-authentication", "ERR-authorization-required", "ERR-cli-session-limit-reached", "ERR-create-keyring", "ERR-create-locale", "ERR-create-role", "ERR-create-tp", "ERR-create-user", "ERR-delete-locale", "ERR-delete-role", "ERR-delete-session", "ERR-delete-user", "ERR-downgrade-fail", "ERR-efi-Diagnostics--in-progress", "ERR-enable-mgmt-conn", "ERR-ep-set-error", "ERR-get-max-http-user-sessions", "ERR-http-initializing", "ERR-insufficiently-equipped", "ERR-internal-error", "ERR-ldap-delete-error", "ERR-ldap-get-error", "ERR-ldap-group-modify-error", "ERR-ldap-group-set-error", "ERR-ldap-set-error", "ERR-locale-set-error", "ERR-max-userid-sessions-reached", "ERR-missing-method", "ERR-modify-locale", "ERR-modify-role", "ERR-modify-user", "ERR-modify-user-locale", "ERR-modify-user-role", "ERR-provider-group-modify-error", "ERR-provider-group-set-error", "ERR-radius-get-error", "ERR-radius-global-set-error", "ERR-radius-group-set-error", "ERR-radius-set-error", "ERR-request-timeout", "ERR-reset-adapter", "ERR-role-set-error", "ERR-secondary-node", "ERR-service-not-ready", "ERR-session-cache-full", "ERR-session-not-found", "ERR-set-key-cert", "ERR-set-login-profile", "ERR-set-min-passphrase-length", "ERR-set-network", "ERR-set-password-strength-check", "ERR-set-port-channel", "ERR-store-pre-login-banner-msg", "ERR-tacacs-enable-error", "ERR-tacacs-global-set-error", "ERR-tacacs-group-set-error", "ERR-tacacs-plus-get-error", "ERR-tacacs-set-error", "ERR-test-error-1", "ERR-test-error-2", "ERR-timezone-set-error", "ERR-user-account-expired", "ERR-user-set-error", "ERR-xml-parse-error", "none"], ["0-4294967295"]),
"fsm_rmt_inv_err_descr": MoPropertyMeta("fsm_rmt_inv_err_descr", "fsmRmtInvErrDescr", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, None, 0, 510, None, [], []),
"fsm_rmt_inv_rslt": MoPropertyMeta("fsm_rmt_inv_rslt", "fsmRmtInvRslt", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, None, None, None, r"""((defaultValue|not-applicable|resource-unavailable|service-unavailable|intermittent-error|sw-defect|service-not-implemented-ignore|extend-timeout|capability-not-implemented-failure|illegal-fru|end-point-unavailable|failure|resource-capacity-exceeded|service-protocol-error|fw-defect|service-not-implemented-fail|task-reset|unidentified-fail|capability-not-supported|end-point-failed|fru-state-indeterminate|resource-dependency|fru-identity-indeterminate|internal-error|hw-defect|service-not-supported|fru-not-supported|end-point-protocol-error|capability-unavailable|fru-not-ready|capability-not-implemented-ignore|fru-info-malformed|timeout),){0,32}(defaultValue|not-applicable|resource-unavailable|service-unavailable|intermittent-error|sw-defect|service-not-implemented-ignore|extend-timeout|capability-not-implemented-failure|illegal-fru|end-point-unavailable|failure|resource-capacity-exceeded|service-protocol-error|fw-defect|service-not-implemented-fail|task-reset|unidentified-fail|capability-not-supported|end-point-failed|fru-state-indeterminate|resource-dependency|fru-identity-indeterminate|internal-error|hw-defect|service-not-supported|fru-not-supported|end-point-protocol-error|capability-unavailable|fru-not-ready|capability-not-implemented-ignore|fru-info-malformed|timeout){0,1}""", [], []),
"fsm_stage_descr": MoPropertyMeta("fsm_stage_descr", "fsmStageDescr", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"fsm_stamp": MoPropertyMeta("fsm_stamp", "fsmStamp", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, None, None, None, r"""([0-9]){4}-([0-9]){2}-([0-9]){2}T([0-9]){2}:([0-9]){2}:([0-9]){2}((\.([0-9]){3})){0,1}""", ["never"], []),
"fsm_status": MoPropertyMeta("fsm_status", "fsmStatus", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, None, None, None, None, ["DeployBegin", "DeployFail", "DeploySuccess", "DeployUpdateConnectivity", "DeployUpdateVlanGroups", "DeployWaitOnSwConfigHost", "nop"], []),
"fsm_try": MoPropertyMeta("fsm_try", "fsmTry", "byte", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"last_vlan_grp_compute_time": MoPropertyMeta("last_vlan_grp_compute_time", "lastVlanGrpComputeTime", "string", VersionMeta.Version226c, MoPropertyMeta.READ_ONLY, None, None, None, r"""([0-9]){4}-([0-9]){2}-([0-9]){2}T([0-9]){2}:([0-9]){2}:([0-9]){2}((\.([0-9]){3})){0,1}""", [], []),
"locale": MoPropertyMeta("locale", "locale", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|unknown|server|chassis|internal|external),){0,5}(defaultValue|unknown|server|chassis|internal|external){0,1}""", [], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version101e, MoPropertyMeta.CREATE_ONLY, 0x8, None, None, r"""[\-\.:_a-zA-Z0-9]{0,16}""", [], []),
"num_vlan_limit": MoPropertyMeta("num_vlan_limit", "numVlanLimit", "string", VersionMeta.Version227b, MoPropertyMeta.READ_ONLY, None, None, None, None, ["exceeded", "ok"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x20, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"switch_id": MoPropertyMeta("switch_id", "switchId", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, ["A", "B", "NONE"], []),
"transport": MoPropertyMeta("transport", "transport", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|unknown|ether|dce|fc),){0,4}(defaultValue|unknown|ether|dce|fc){0,1}""", [], []),
"type": MoPropertyMeta("type", "type", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|unknown|lan|san|ipc),){0,4}(defaultValue|unknown|lan|san|ipc){0,1}""", [], []),
"udld_msg_interval": MoPropertyMeta("udld_msg_interval", "udldMsgInterval", "ushort", VersionMeta.Version221b, MoPropertyMeta.READ_ONLY, None, None, None, None, [], ["7-90"]),
"udld_recovery_action": MoPropertyMeta("udld_recovery_action", "udldRecoveryAction", "string", VersionMeta.Version221b, MoPropertyMeta.READ_ONLY, None, None, None, None, ["none", "reset"], []),
}
prop_map = {
"childAction": "child_action",
"deployFlag": "deploy_flag",
"dn": "dn",
"fsmDescr": "fsm_descr",
"fsmFlags": "fsm_flags",
"fsmPrev": "fsm_prev",
"fsmProgr": "fsm_progr",
"fsmRmtInvErrCode": "fsm_rmt_inv_err_code",
"fsmRmtInvErrDescr": "fsm_rmt_inv_err_descr",
"fsmRmtInvRslt": "fsm_rmt_inv_rslt",
"fsmStageDescr": "fsm_stage_descr",
"fsmStamp": "fsm_stamp",
"fsmStatus": "fsm_status",
"fsmTry": "fsm_try",
"lastVlanGrpComputeTime": "last_vlan_grp_compute_time",
"locale": "locale",
"name": "name",
"numVlanLimit": "num_vlan_limit",
"rn": "rn",
"sacl": "sacl",
"status": "status",
"switchId": "switch_id",
"transport": "transport",
"type": "type",
"udldMsgInterval": "udld_msg_interval",
"udldRecoveryAction": "udld_recovery_action",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.deploy_flag = None
self.fsm_descr = None
self.fsm_flags = None
self.fsm_prev = None
self.fsm_progr = None
self.fsm_rmt_inv_err_code = None
self.fsm_rmt_inv_err_descr = None
self.fsm_rmt_inv_rslt = None
self.fsm_stage_descr = None
self.fsm_stamp = None
self.fsm_status = None
self.fsm_try = None
self.last_vlan_grp_compute_time = None
self.locale = None
self.name = None
self.num_vlan_limit = None
self.sacl = None
self.status = None
self.switch_id = None
self.transport = None
self.type = None
self.udld_msg_interval = None
self.udld_recovery_action = None
ManagedObject.__init__(self, "SwEthLanBorder", parent_mo_or_dn, **kwargs)
| 92.382239
| 3,748
| 0.757596
|
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class SwEthLanBorderConsts:
FSM_PREV_DEPLOY_BEGIN = "DeployBegin"
FSM_PREV_DEPLOY_FAIL = "DeployFail"
FSM_PREV_DEPLOY_SUCCESS = "DeploySuccess"
FSM_PREV_DEPLOY_UPDATE_CONNECTIVITY = "DeployUpdateConnectivity"
FSM_PREV_DEPLOY_UPDATE_VLAN_GROUPS = "DeployUpdateVlanGroups"
FSM_PREV_DEPLOY_WAIT_ON_SW_CONFIG_HOST = "DeployWaitOnSwConfigHost"
FSM_PREV_NOP = "nop"
FSM_RMT_INV_ERR_CODE_ERR_2FA_AUTH_RETRY = "ERR-2fa-auth-retry"
FSM_RMT_INV_ERR_CODE_ERR_ACTIVATE_FAILED = "ERR-ACTIVATE-failed"
FSM_RMT_INV_ERR_CODE_ERR_ACTIVATE_IN_PROGRESS = "ERR-ACTIVATE-in-progress"
FSM_RMT_INV_ERR_CODE_ERR_ACTIVATE_RETRY = "ERR-ACTIVATE-retry"
FSM_RMT_INV_ERR_CODE_ERR_BIOS_TOKENS_OLD_BIOS = "ERR-BIOS-TOKENS-OLD-BIOS"
FSM_RMT_INV_ERR_CODE_ERR_BIOS_TOKENS_OLD_CIMC = "ERR-BIOS-TOKENS-OLD-CIMC"
FSM_RMT_INV_ERR_CODE_ERR_BIOS_NETWORK_BOOT_ORDER_NOT_FOUND = "ERR-BIOS-network-boot-order-not-found"
FSM_RMT_INV_ERR_CODE_ERR_BOARDCTRLUPDATE_IGNORE = "ERR-BOARDCTRLUPDATE-ignore"
FSM_RMT_INV_ERR_CODE_ERR_DIAG_CANCELLED = "ERR-DIAG-cancelled"
FSM_RMT_INV_ERR_CODE_ERR_DIAG_FSM_RESTARTED = "ERR-DIAG-fsm-restarted"
FSM_RMT_INV_ERR_CODE_ERR_DIAG_TEST_FAILED = "ERR-DIAG-test-failed"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_AUTHENTICATION_FAILURE = "ERR-DNLD-authentication-failure"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_HOSTKEY_MISMATCH = "ERR-DNLD-hostkey-mismatch"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_INVALID_IMAGE = "ERR-DNLD-invalid-image"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_NO_FILE = "ERR-DNLD-no-file"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_NO_SPACE = "ERR-DNLD-no-space"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_USB_UNMOUNTED = "ERR-DNLD-usb-unmounted"
FSM_RMT_INV_ERR_CODE_ERR_DNS_DELETE_ERROR = "ERR-DNS-delete-error"
FSM_RMT_INV_ERR_CODE_ERR_DNS_GET_ERROR = "ERR-DNS-get-error"
FSM_RMT_INV_ERR_CODE_ERR_DNS_SET_ERROR = "ERR-DNS-set-error"
FSM_RMT_INV_ERR_CODE_ERR_DIAGNOSTICS_IN_PROGRESS = "ERR-Diagnostics-in-progress"
FSM_RMT_INV_ERR_CODE_ERR_DIAGNOSTICS_MEMTEST_IN_PROGRESS = "ERR-Diagnostics-memtest-in-progress"
FSM_RMT_INV_ERR_CODE_ERR_DIAGNOSTICS_NETWORK_IN_PROGRESS = "ERR-Diagnostics-network-in-progress"
FSM_RMT_INV_ERR_CODE_ERR_FILTER_ILLEGAL_FORMAT = "ERR-FILTER-illegal-format"
FSM_RMT_INV_ERR_CODE_ERR_FSM_NO_SUCH_STATE = "ERR-FSM-no-such-state"
FSM_RMT_INV_ERR_CODE_ERR_HOST_FRU_IDENTITY_MISMATCH = "ERR-HOST-fru-identity-mismatch"
FSM_RMT_INV_ERR_CODE_ERR_HTTP_SET_ERROR = "ERR-HTTP-set-error"
FSM_RMT_INV_ERR_CODE_ERR_HTTPS_SET_ERROR = "ERR-HTTPS-set-error"
FSM_RMT_INV_ERR_CODE_ERR_IBMC_ANALYZE_RESULTS = "ERR-IBMC-analyze-results"
FSM_RMT_INV_ERR_CODE_ERR_IBMC_CONNECT_ERROR = "ERR-IBMC-connect-error"
FSM_RMT_INV_ERR_CODE_ERR_IBMC_CONNECTOR_INFO_RETRIEVAL_ERROR = "ERR-IBMC-connector-info-retrieval-error"
FSM_RMT_INV_ERR_CODE_ERR_IBMC_FRU_RETRIEVAL_ERROR = "ERR-IBMC-fru-retrieval-error"
FSM_RMT_INV_ERR_CODE_ERR_IBMC_INVALID_END_POINT_CONFIG = "ERR-IBMC-invalid-end-point-config"
FSM_RMT_INV_ERR_CODE_ERR_IBMC_RESULTS_NOT_READY = "ERR-IBMC-results-not-ready"
FSM_RMT_INV_ERR_CODE_ERR_MAX_SUBSCRIPTIONS_ALLOWED_ERROR = "ERR-MAX-subscriptions-allowed-error"
FSM_RMT_INV_ERR_CODE_ERR_MO_CONFIG_CHILD_OBJECT_CANT_BE_CONFIGURED = "ERR-MO-CONFIG-child-object-cant-be-configured"
FSM_RMT_INV_ERR_CODE_ERR_MO_META_NO_SUCH_OBJECT_CLASS = "ERR-MO-META-no-such-object-class"
FSM_RMT_INV_ERR_CODE_ERR_MO_PROPERTY_NO_SUCH_PROPERTY = "ERR-MO-PROPERTY-no-such-property"
FSM_RMT_INV_ERR_CODE_ERR_MO_PROPERTY_VALUE_OUT_OF_RANGE = "ERR-MO-PROPERTY-value-out-of-range"
FSM_RMT_INV_ERR_CODE_ERR_MO_ACCESS_DENIED = "ERR-MO-access-denied"
FSM_RMT_INV_ERR_CODE_ERR_MO_DELETION_RULE_VIOLATION = "ERR-MO-deletion-rule-violation"
FSM_RMT_INV_ERR_CODE_ERR_MO_DUPLICATE_OBJECT = "ERR-MO-duplicate-object"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_CONTAINMENT = "ERR-MO-illegal-containment"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_CREATION = "ERR-MO-illegal-creation"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_ITERATOR_STATE = "ERR-MO-illegal-iterator-state"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_OBJECT_LIFECYCLE_TRANSITION = "ERR-MO-illegal-object-lifecycle-transition"
FSM_RMT_INV_ERR_CODE_ERR_MO_NAMING_RULE_VIOLATION = "ERR-MO-naming-rule-violation"
FSM_RMT_INV_ERR_CODE_ERR_MO_OBJECT_NOT_FOUND = "ERR-MO-object-not-found"
FSM_RMT_INV_ERR_CODE_ERR_MO_RESOURCE_ALLOCATION = "ERR-MO-resource-allocation"
FSM_RMT_INV_ERR_CODE_ERR_NTP_DELETE_ERROR = "ERR-NTP-delete-error"
FSM_RMT_INV_ERR_CODE_ERR_NTP_GET_ERROR = "ERR-NTP-get-error"
FSM_RMT_INV_ERR_CODE_ERR_NTP_SET_ERROR = "ERR-NTP-set-error"
FSM_RMT_INV_ERR_CODE_ERR_POWER_CAP_UNSUPPORTED = "ERR-POWER-CAP-UNSUPPORTED"
FSM_RMT_INV_ERR_CODE_ERR_POWER_PROFILE_IN_PROGRESS = "ERR-POWER-PROFILE-IN-PROGRESS"
FSM_RMT_INV_ERR_CODE_ERR_SERVER_MIS_CONNECT = "ERR-SERVER-mis-connect"
FSM_RMT_INV_ERR_CODE_ERR_SWITCH_INVALID_IF_CONFIG = "ERR-SWITCH-invalid-if-config"
FSM_RMT_INV_ERR_CODE_ERR_TOKEN_REQUEST_DENIED = "ERR-TOKEN-request-denied"
FSM_RMT_INV_ERR_CODE_ERR_UNABLE_TO_FETCH_BIOS_SETTINGS = "ERR-UNABLE-TO-FETCH-BIOS-SETTINGS"
FSM_RMT_INV_ERR_CODE_ERR_UPDATE_FAILED = "ERR-UPDATE-failed"
FSM_RMT_INV_ERR_CODE_ERR_UPDATE_IN_PROGRESS = "ERR-UPDATE-in-progress"
FSM_RMT_INV_ERR_CODE_ERR_UPDATE_RETRY = "ERR-UPDATE-retry"
FSM_RMT_INV_ERR_CODE_ERR_AAA_CONFIG_MODIFY_ERROR = "ERR-aaa-config-modify-error"
FSM_RMT_INV_ERR_CODE_ERR_ACCT_REALM_SET_ERROR = "ERR-acct-realm-set-error"
FSM_RMT_INV_ERR_CODE_ERR_ADMIN_PASSWD_SET = "ERR-admin-passwd-set"
FSM_RMT_INV_ERR_CODE_ERR_AUTH_ISSUE = "ERR-auth-issue"
FSM_RMT_INV_ERR_CODE_ERR_AUTH_REALM_GET_ERROR = "ERR-auth-realm-get-error"
FSM_RMT_INV_ERR_CODE_ERR_AUTH_REALM_SET_ERROR = "ERR-auth-realm-set-error"
FSM_RMT_INV_ERR_CODE_ERR_AUTHENTICATION = "ERR-authentication"
FSM_RMT_INV_ERR_CODE_ERR_AUTHORIZATION_REQUIRED = "ERR-authorization-required"
FSM_RMT_INV_ERR_CODE_ERR_CLI_SESSION_LIMIT_REACHED = "ERR-cli-session-limit-reached"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_KEYRING = "ERR-create-keyring"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_LOCALE = "ERR-create-locale"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_ROLE = "ERR-create-role"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_TP = "ERR-create-tp"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_USER = "ERR-create-user"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_LOCALE = "ERR-delete-locale"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_ROLE = "ERR-delete-role"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_SESSION = "ERR-delete-session"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_USER = "ERR-delete-user"
FSM_RMT_INV_ERR_CODE_ERR_DOWNGRADE_FAIL = "ERR-downgrade-fail"
FSM_RMT_INV_ERR_CODE_ERR_EFI_DIAGNOSTICS_IN_PROGRESS = "ERR-efi-Diagnostics--in-progress"
FSM_RMT_INV_ERR_CODE_ERR_ENABLE_MGMT_CONN = "ERR-enable-mgmt-conn"
FSM_RMT_INV_ERR_CODE_ERR_EP_SET_ERROR = "ERR-ep-set-error"
FSM_RMT_INV_ERR_CODE_ERR_GET_MAX_HTTP_USER_SESSIONS = "ERR-get-max-http-user-sessions"
FSM_RMT_INV_ERR_CODE_ERR_HTTP_INITIALIZING = "ERR-http-initializing"
FSM_RMT_INV_ERR_CODE_ERR_INSUFFICIENTLY_EQUIPPED = "ERR-insufficiently-equipped"
FSM_RMT_INV_ERR_CODE_ERR_INTERNAL_ERROR = "ERR-internal-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_DELETE_ERROR = "ERR-ldap-delete-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_GET_ERROR = "ERR-ldap-get-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_GROUP_MODIFY_ERROR = "ERR-ldap-group-modify-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_GROUP_SET_ERROR = "ERR-ldap-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_SET_ERROR = "ERR-ldap-set-error"
FSM_RMT_INV_ERR_CODE_ERR_LOCALE_SET_ERROR = "ERR-locale-set-error"
FSM_RMT_INV_ERR_CODE_ERR_MAX_USERID_SESSIONS_REACHED = "ERR-max-userid-sessions-reached"
FSM_RMT_INV_ERR_CODE_ERR_MISSING_METHOD = "ERR-missing-method"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_LOCALE = "ERR-modify-locale"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_ROLE = "ERR-modify-role"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_USER = "ERR-modify-user"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_USER_LOCALE = "ERR-modify-user-locale"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_USER_ROLE = "ERR-modify-user-role"
FSM_RMT_INV_ERR_CODE_ERR_PROVIDER_GROUP_MODIFY_ERROR = "ERR-provider-group-modify-error"
FSM_RMT_INV_ERR_CODE_ERR_PROVIDER_GROUP_SET_ERROR = "ERR-provider-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_RADIUS_GET_ERROR = "ERR-radius-get-error"
FSM_RMT_INV_ERR_CODE_ERR_RADIUS_GLOBAL_SET_ERROR = "ERR-radius-global-set-error"
FSM_RMT_INV_ERR_CODE_ERR_RADIUS_GROUP_SET_ERROR = "ERR-radius-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_RADIUS_SET_ERROR = "ERR-radius-set-error"
FSM_RMT_INV_ERR_CODE_ERR_REQUEST_TIMEOUT = "ERR-request-timeout"
FSM_RMT_INV_ERR_CODE_ERR_RESET_ADAPTER = "ERR-reset-adapter"
FSM_RMT_INV_ERR_CODE_ERR_ROLE_SET_ERROR = "ERR-role-set-error"
FSM_RMT_INV_ERR_CODE_ERR_SECONDARY_NODE = "ERR-secondary-node"
FSM_RMT_INV_ERR_CODE_ERR_SERVICE_NOT_READY = "ERR-service-not-ready"
FSM_RMT_INV_ERR_CODE_ERR_SESSION_CACHE_FULL = "ERR-session-cache-full"
FSM_RMT_INV_ERR_CODE_ERR_SESSION_NOT_FOUND = "ERR-session-not-found"
FSM_RMT_INV_ERR_CODE_ERR_SET_KEY_CERT = "ERR-set-key-cert"
FSM_RMT_INV_ERR_CODE_ERR_SET_LOGIN_PROFILE = "ERR-set-login-profile"
FSM_RMT_INV_ERR_CODE_ERR_SET_MIN_PASSPHRASE_LENGTH = "ERR-set-min-passphrase-length"
FSM_RMT_INV_ERR_CODE_ERR_SET_NETWORK = "ERR-set-network"
FSM_RMT_INV_ERR_CODE_ERR_SET_PASSWORD_STRENGTH_CHECK = "ERR-set-password-strength-check"
FSM_RMT_INV_ERR_CODE_ERR_SET_PORT_CHANNEL = "ERR-set-port-channel"
FSM_RMT_INV_ERR_CODE_ERR_STORE_PRE_LOGIN_BANNER_MSG = "ERR-store-pre-login-banner-msg"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_ENABLE_ERROR = "ERR-tacacs-enable-error"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_GLOBAL_SET_ERROR = "ERR-tacacs-global-set-error"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_GROUP_SET_ERROR = "ERR-tacacs-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_PLUS_GET_ERROR = "ERR-tacacs-plus-get-error"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_SET_ERROR = "ERR-tacacs-set-error"
FSM_RMT_INV_ERR_CODE_ERR_TEST_ERROR_1 = "ERR-test-error-1"
FSM_RMT_INV_ERR_CODE_ERR_TEST_ERROR_2 = "ERR-test-error-2"
FSM_RMT_INV_ERR_CODE_ERR_TIMEZONE_SET_ERROR = "ERR-timezone-set-error"
FSM_RMT_INV_ERR_CODE_ERR_USER_ACCOUNT_EXPIRED = "ERR-user-account-expired"
FSM_RMT_INV_ERR_CODE_ERR_USER_SET_ERROR = "ERR-user-set-error"
FSM_RMT_INV_ERR_CODE_ERR_XML_PARSE_ERROR = "ERR-xml-parse-error"
FSM_RMT_INV_ERR_CODE_NONE = "none"
FSM_STAMP_NEVER = "never"
FSM_STATUS_DEPLOY_BEGIN = "DeployBegin"
FSM_STATUS_DEPLOY_FAIL = "DeployFail"
FSM_STATUS_DEPLOY_SUCCESS = "DeploySuccess"
FSM_STATUS_DEPLOY_UPDATE_CONNECTIVITY = "DeployUpdateConnectivity"
FSM_STATUS_DEPLOY_UPDATE_VLAN_GROUPS = "DeployUpdateVlanGroups"
FSM_STATUS_DEPLOY_WAIT_ON_SW_CONFIG_HOST = "DeployWaitOnSwConfigHost"
FSM_STATUS_NOP = "nop"
NUM_VLAN_LIMIT_EXCEEDED = "exceeded"
NUM_VLAN_LIMIT_OK = "ok"
SWITCH_ID_A = "A"
SWITCH_ID_B = "B"
SWITCH_ID_NONE = "NONE"
UDLD_RECOVERY_ACTION_NONE = "none"
UDLD_RECOVERY_ACTION_RESET = "reset"
class SwEthLanBorder(ManagedObject):
consts = SwEthLanBorderConsts()
naming_props = set([])
mo_meta = MoMeta("SwEthLanBorder", "swEthLanBorder", "border-eth", VersionMeta.Version101e, "InputOutput", 0x7f, [], ["read-only"], [u'networkElement'], [u'eventInst', u'faultInst', u'swEthEstcEp', u'swEthEstcPc', u'swEthLanBorderFsm', u'swEthLanBorderFsmTask', u'swEthLanEp', u'swEthLanPc', u'swSubGroup', u'swVlan', u'swVlanGroup'], ["Get"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"deploy_flag": MoPropertyMeta("deploy_flag", "deployFlag", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"fsm_descr": MoPropertyMeta("fsm_descr", "fsmDescr", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"fsm_flags": MoPropertyMeta("fsm_flags", "fsmFlags", "string", VersionMeta.Version211a, MoPropertyMeta.INTERNAL, None, None, None, None, [], ["0-4294967295"]),
"fsm_prev": MoPropertyMeta("fsm_prev", "fsmPrev", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, None, None, None, None, ["DeployBegin", "DeployFail", "DeploySuccess", "DeployUpdateConnectivity", "DeployUpdateVlanGroups", "DeployWaitOnSwConfigHost", "nop"], []),
"fsm_progr": MoPropertyMeta("fsm_progr", "fsmProgr", "byte", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, None, None, None, None, [], ["0-100"]),
"fsm_rmt_inv_err_code": MoPropertyMeta("fsm_rmt_inv_err_code", "fsmRmtInvErrCode", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, None, None, None, None, ["ERR-2fa-auth-retry", "ERR-ACTIVATE-failed", "ERR-ACTIVATE-in-progress", "ERR-ACTIVATE-retry", "ERR-BIOS-TOKENS-OLD-BIOS", "ERR-BIOS-TOKENS-OLD-CIMC", "ERR-BIOS-network-boot-order-not-found", "ERR-BOARDCTRLUPDATE-ignore", "ERR-DIAG-cancelled", "ERR-DIAG-fsm-restarted", "ERR-DIAG-test-failed", "ERR-DNLD-authentication-failure", "ERR-DNLD-hostkey-mismatch", "ERR-DNLD-invalid-image", "ERR-DNLD-no-file", "ERR-DNLD-no-space", "ERR-DNLD-usb-unmounted", "ERR-DNS-delete-error", "ERR-DNS-get-error", "ERR-DNS-set-error", "ERR-Diagnostics-in-progress", "ERR-Diagnostics-memtest-in-progress", "ERR-Diagnostics-network-in-progress", "ERR-FILTER-illegal-format", "ERR-FSM-no-such-state", "ERR-HOST-fru-identity-mismatch", "ERR-HTTP-set-error", "ERR-HTTPS-set-error", "ERR-IBMC-analyze-results", "ERR-IBMC-connect-error", "ERR-IBMC-connector-info-retrieval-error", "ERR-IBMC-fru-retrieval-error", "ERR-IBMC-invalid-end-point-config", "ERR-IBMC-results-not-ready", "ERR-MAX-subscriptions-allowed-error", "ERR-MO-CONFIG-child-object-cant-be-configured", "ERR-MO-META-no-such-object-class", "ERR-MO-PROPERTY-no-such-property", "ERR-MO-PROPERTY-value-out-of-range", "ERR-MO-access-denied", "ERR-MO-deletion-rule-violation", "ERR-MO-duplicate-object", "ERR-MO-illegal-containment", "ERR-MO-illegal-creation", "ERR-MO-illegal-iterator-state", "ERR-MO-illegal-object-lifecycle-transition", "ERR-MO-naming-rule-violation", "ERR-MO-object-not-found", "ERR-MO-resource-allocation", "ERR-NTP-delete-error", "ERR-NTP-get-error", "ERR-NTP-set-error", "ERR-POWER-CAP-UNSUPPORTED", "ERR-POWER-PROFILE-IN-PROGRESS", "ERR-SERVER-mis-connect", "ERR-SWITCH-invalid-if-config", "ERR-TOKEN-request-denied", "ERR-UNABLE-TO-FETCH-BIOS-SETTINGS", "ERR-UPDATE-failed", "ERR-UPDATE-in-progress", "ERR-UPDATE-retry", "ERR-aaa-config-modify-error", "ERR-acct-realm-set-error", "ERR-admin-passwd-set", "ERR-auth-issue", "ERR-auth-realm-get-error", "ERR-auth-realm-set-error", "ERR-authentication", "ERR-authorization-required", "ERR-cli-session-limit-reached", "ERR-create-keyring", "ERR-create-locale", "ERR-create-role", "ERR-create-tp", "ERR-create-user", "ERR-delete-locale", "ERR-delete-role", "ERR-delete-session", "ERR-delete-user", "ERR-downgrade-fail", "ERR-efi-Diagnostics--in-progress", "ERR-enable-mgmt-conn", "ERR-ep-set-error", "ERR-get-max-http-user-sessions", "ERR-http-initializing", "ERR-insufficiently-equipped", "ERR-internal-error", "ERR-ldap-delete-error", "ERR-ldap-get-error", "ERR-ldap-group-modify-error", "ERR-ldap-group-set-error", "ERR-ldap-set-error", "ERR-locale-set-error", "ERR-max-userid-sessions-reached", "ERR-missing-method", "ERR-modify-locale", "ERR-modify-role", "ERR-modify-user", "ERR-modify-user-locale", "ERR-modify-user-role", "ERR-provider-group-modify-error", "ERR-provider-group-set-error", "ERR-radius-get-error", "ERR-radius-global-set-error", "ERR-radius-group-set-error", "ERR-radius-set-error", "ERR-request-timeout", "ERR-reset-adapter", "ERR-role-set-error", "ERR-secondary-node", "ERR-service-not-ready", "ERR-session-cache-full", "ERR-session-not-found", "ERR-set-key-cert", "ERR-set-login-profile", "ERR-set-min-passphrase-length", "ERR-set-network", "ERR-set-password-strength-check", "ERR-set-port-channel", "ERR-store-pre-login-banner-msg", "ERR-tacacs-enable-error", "ERR-tacacs-global-set-error", "ERR-tacacs-group-set-error", "ERR-tacacs-plus-get-error", "ERR-tacacs-set-error", "ERR-test-error-1", "ERR-test-error-2", "ERR-timezone-set-error", "ERR-user-account-expired", "ERR-user-set-error", "ERR-xml-parse-error", "none"], ["0-4294967295"]),
"fsm_rmt_inv_err_descr": MoPropertyMeta("fsm_rmt_inv_err_descr", "fsmRmtInvErrDescr", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, None, 0, 510, None, [], []),
"fsm_rmt_inv_rslt": MoPropertyMeta("fsm_rmt_inv_rslt", "fsmRmtInvRslt", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, None, None, None, r"""((defaultValue|not-applicable|resource-unavailable|service-unavailable|intermittent-error|sw-defect|service-not-implemented-ignore|extend-timeout|capability-not-implemented-failure|illegal-fru|end-point-unavailable|failure|resource-capacity-exceeded|service-protocol-error|fw-defect|service-not-implemented-fail|task-reset|unidentified-fail|capability-not-supported|end-point-failed|fru-state-indeterminate|resource-dependency|fru-identity-indeterminate|internal-error|hw-defect|service-not-supported|fru-not-supported|end-point-protocol-error|capability-unavailable|fru-not-ready|capability-not-implemented-ignore|fru-info-malformed|timeout),){0,32}(defaultValue|not-applicable|resource-unavailable|service-unavailable|intermittent-error|sw-defect|service-not-implemented-ignore|extend-timeout|capability-not-implemented-failure|illegal-fru|end-point-unavailable|failure|resource-capacity-exceeded|service-protocol-error|fw-defect|service-not-implemented-fail|task-reset|unidentified-fail|capability-not-supported|end-point-failed|fru-state-indeterminate|resource-dependency|fru-identity-indeterminate|internal-error|hw-defect|service-not-supported|fru-not-supported|end-point-protocol-error|capability-unavailable|fru-not-ready|capability-not-implemented-ignore|fru-info-malformed|timeout){0,1}""", [], []),
"fsm_stage_descr": MoPropertyMeta("fsm_stage_descr", "fsmStageDescr", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"fsm_stamp": MoPropertyMeta("fsm_stamp", "fsmStamp", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, None, None, None, r"""([0-9]){4}-([0-9]){2}-([0-9]){2}T([0-9]){2}:([0-9]){2}:([0-9]){2}((\.([0-9]){3})){0,1}""", ["never"], []),
"fsm_status": MoPropertyMeta("fsm_status", "fsmStatus", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, None, None, None, None, ["DeployBegin", "DeployFail", "DeploySuccess", "DeployUpdateConnectivity", "DeployUpdateVlanGroups", "DeployWaitOnSwConfigHost", "nop"], []),
"fsm_try": MoPropertyMeta("fsm_try", "fsmTry", "byte", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"last_vlan_grp_compute_time": MoPropertyMeta("last_vlan_grp_compute_time", "lastVlanGrpComputeTime", "string", VersionMeta.Version226c, MoPropertyMeta.READ_ONLY, None, None, None, r"""([0-9]){4}-([0-9]){2}-([0-9]){2}T([0-9]){2}:([0-9]){2}:([0-9]){2}((\.([0-9]){3})){0,1}""", [], []),
"locale": MoPropertyMeta("locale", "locale", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|unknown|server|chassis|internal|external),){0,5}(defaultValue|unknown|server|chassis|internal|external){0,1}""", [], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version101e, MoPropertyMeta.CREATE_ONLY, 0x8, None, None, r"""[\-\.:_a-zA-Z0-9]{0,16}""", [], []),
"num_vlan_limit": MoPropertyMeta("num_vlan_limit", "numVlanLimit", "string", VersionMeta.Version227b, MoPropertyMeta.READ_ONLY, None, None, None, None, ["exceeded", "ok"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x20, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"switch_id": MoPropertyMeta("switch_id", "switchId", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, ["A", "B", "NONE"], []),
"transport": MoPropertyMeta("transport", "transport", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|unknown|ether|dce|fc),){0,4}(defaultValue|unknown|ether|dce|fc){0,1}""", [], []),
"type": MoPropertyMeta("type", "type", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|unknown|lan|san|ipc),){0,4}(defaultValue|unknown|lan|san|ipc){0,1}""", [], []),
"udld_msg_interval": MoPropertyMeta("udld_msg_interval", "udldMsgInterval", "ushort", VersionMeta.Version221b, MoPropertyMeta.READ_ONLY, None, None, None, None, [], ["7-90"]),
"udld_recovery_action": MoPropertyMeta("udld_recovery_action", "udldRecoveryAction", "string", VersionMeta.Version221b, MoPropertyMeta.READ_ONLY, None, None, None, None, ["none", "reset"], []),
}
prop_map = {
"childAction": "child_action",
"deployFlag": "deploy_flag",
"dn": "dn",
"fsmDescr": "fsm_descr",
"fsmFlags": "fsm_flags",
"fsmPrev": "fsm_prev",
"fsmProgr": "fsm_progr",
"fsmRmtInvErrCode": "fsm_rmt_inv_err_code",
"fsmRmtInvErrDescr": "fsm_rmt_inv_err_descr",
"fsmRmtInvRslt": "fsm_rmt_inv_rslt",
"fsmStageDescr": "fsm_stage_descr",
"fsmStamp": "fsm_stamp",
"fsmStatus": "fsm_status",
"fsmTry": "fsm_try",
"lastVlanGrpComputeTime": "last_vlan_grp_compute_time",
"locale": "locale",
"name": "name",
"numVlanLimit": "num_vlan_limit",
"rn": "rn",
"sacl": "sacl",
"status": "status",
"switchId": "switch_id",
"transport": "transport",
"type": "type",
"udldMsgInterval": "udld_msg_interval",
"udldRecoveryAction": "udld_recovery_action",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.deploy_flag = None
self.fsm_descr = None
self.fsm_flags = None
self.fsm_prev = None
self.fsm_progr = None
self.fsm_rmt_inv_err_code = None
self.fsm_rmt_inv_err_descr = None
self.fsm_rmt_inv_rslt = None
self.fsm_stage_descr = None
self.fsm_stamp = None
self.fsm_status = None
self.fsm_try = None
self.last_vlan_grp_compute_time = None
self.locale = None
self.name = None
self.num_vlan_limit = None
self.sacl = None
self.status = None
self.switch_id = None
self.transport = None
self.type = None
self.udld_msg_interval = None
self.udld_recovery_action = None
ManagedObject.__init__(self, "SwEthLanBorder", parent_mo_or_dn, **kwargs)
| true
| true
|
1c4140b8f07e0f6bb144293974f33c848b5399b7
| 792
|
py
|
Python
|
django_autoutils/serializer_utils.py
|
rezazeiny/django-autoutils
|
268181cbe2e08b2ea042ec02e4145b5578218382
|
[
"MIT"
] | null | null | null |
django_autoutils/serializer_utils.py
|
rezazeiny/django-autoutils
|
268181cbe2e08b2ea042ec02e4145b5578218382
|
[
"MIT"
] | null | null | null |
django_autoutils/serializer_utils.py
|
rezazeiny/django-autoutils
|
268181cbe2e08b2ea042ec02e4145b5578218382
|
[
"MIT"
] | null | null | null |
"""
Some good utils for serializer
"""
from rest_framework.fields import HiddenField, CurrentUserDefault
class ContextFieldSerializer:
"""
Get current voice
"""
requires_context = True
def __init__(self, field):
self.field = field
def __call__(self, serializer_field):
return serializer_field.context[self.field]
def __repr__(self):
return f'{self.__class__.__name__}()'
def get_hidden_field(field):
"""
Use this function in serializer field and send data in context
"""
return HiddenField(default=ContextFieldSerializer(field))
def get_current_user_default():
"""
Use this function in serializer field for access to current user
"""
return HiddenField(default=CurrentUserDefault())
| 22.628571
| 72
| 0.685606
|
from rest_framework.fields import HiddenField, CurrentUserDefault
class ContextFieldSerializer:
requires_context = True
def __init__(self, field):
self.field = field
def __call__(self, serializer_field):
return serializer_field.context[self.field]
def __repr__(self):
return f'{self.__class__.__name__}()'
def get_hidden_field(field):
return HiddenField(default=ContextFieldSerializer(field))
def get_current_user_default():
return HiddenField(default=CurrentUserDefault())
| true
| true
|
1c41413e201c255b13f6b59e8dcfabfe09c3066f
| 7,725
|
py
|
Python
|
intuitlib/client.py
|
nicholas-ramsey/oauth-pythonclient
|
6a40386a17cc913828b1d9c420cc91f937813299
|
[
"Apache-2.0"
] | null | null | null |
intuitlib/client.py
|
nicholas-ramsey/oauth-pythonclient
|
6a40386a17cc913828b1d9c420cc91f937813299
|
[
"Apache-2.0"
] | null | null | null |
intuitlib/client.py
|
nicholas-ramsey/oauth-pythonclient
|
6a40386a17cc913828b1d9c420cc91f937813299
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2018 Intuit
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import json
import requests
from future.moves.urllib.parse import urlencode
from intuitlib.utils import (
get_discovery_doc,
generate_token,
scopes_to_string,
get_auth_header,
send_request,
)
class AuthClient(requests.Session):
"""Handles OAuth 2.0 and OpenID Connect flows to get access to User Info API, Accoutning APIs and Payments APIs
"""
def __init__(self, client_id, client_secret, redirect_uri, environment, state_token=None, access_token=None, refresh_token=None, id_token=None, realm_id=None):
"""Constructor for AuthClient
:param client_id: Client ID found in developer account Keys tab
:param client_secret: Client Secret found in developer account Keys tab
:param redirect_uri: Redirect URI, handles callback from provider
:param environment: App Environment, accepted values: 'sandbox','production','prod'
:param state_token: CSRF token, generated if not provided, defaults to None
:param access_token: Access Token for refresh or revoke functionality, defaults to None
:param refresh_token: Refresh Token for refresh or revoke functionality, defaults to None
:param id_token: ID Token for OpenID flow, defaults to None
:param realm_id: QBO Realm/Company ID, defaults to None
"""
super(AuthClient, self).__init__()
self.client_id = client_id
self.client_secret = client_secret
self.redirect_uri = redirect_uri
self.environment = environment
self.state_token = state_token
# Discovery doc contains endpoints based on environment specified
discovery_doc = get_discovery_doc(self.environment, session=self)
self.auth_endpoint = discovery_doc['authorization_endpoint']
self.token_endpoint = discovery_doc['token_endpoint']
self.revoke_endpoint = discovery_doc['revocation_endpoint']
self.issuer_uri = discovery_doc['issuer']
self.jwks_uri = discovery_doc['jwks_uri']
self.user_info_url = discovery_doc['userinfo_endpoint']
# response values
self.realm_id = realm_id
self.access_token = access_token
self.expires_in = None
self.refresh_token = refresh_token
self.x_refresh_token_expires_in = None
self.id_token = id_token
def setAuthorizeURLs(self, urlObject):
"""Set authorization url using custom values passed in the data dict
:param **data: data dict for custom authorizationURLS
:return: self
"""
if urlObject is not None:
self.auth_endpoint = urlObject['auth_endpoint']
self.token_endpoint = urlObject['token_endpoint']
self.revoke_endpoint = urlObject['revoke_endpoint']
self.user_info_url = urlObject['user_info_url']
return None
def get_authorization_url(self, scopes, state_token=None):
"""Generates authorization url using scopes specified where user is redirected to
:param scopes: Scopes for OAuth/OpenId flow
:type scopes: list of enum, `intuitlib.enums.Scopes`
:param state_token: CSRF token, defaults to None
:return: Authorization url
"""
state = state_token or self.state_token
if state is None:
state = generate_token()
self.state_token = state
url_params = {
'client_id': self.client_id,
'response_type': 'code',
'scope': scopes_to_string(scopes),
'redirect_uri': self.redirect_uri,
'state': self.state_token
}
return '?'.join([self.auth_endpoint, urlencode(url_params)])
def get_bearer_token(self, auth_code, realm_id=None):
"""Gets access_token and refresh_token using authorization code
:param auth_code: Authorization code received from redirect_uri
:param realm_id: Realm ID/Company ID of the QBO company
:raises `intuitlib.exceptions.AuthClientError`: if response status != 200
"""
realm = realm_id or self.realm_id
if realm is not None:
self.realm_id = realm
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': get_auth_header(self.client_id, self.client_secret)
}
body = {
'grant_type': 'authorization_code',
'code': auth_code,
'redirect_uri': self.redirect_uri
}
send_request('POST', self.token_endpoint, headers, self, body=urlencode(body), session=self)
def refresh(self, refresh_token=None):
"""Gets fresh access_token and refresh_token
:param refresh_token: Refresh Token
:raises ValueError: if Refresh Token value not specified
:raises `intuitlib.exceptions.AuthClientError`: if response status != 200
"""
token = refresh_token or self.refresh_token
if token is None:
raise ValueError('Refresh token not specified')
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': get_auth_header(self.client_id, self.client_secret)
}
body = {
'grant_type': 'refresh_token',
'refresh_token': token
}
send_request('POST', self.token_endpoint, headers, self, body=urlencode(body), session=self)
def revoke(self, token=None):
"""Revokes access to QBO company/User Info using either valid Refresh Token or Access Token
:param token: Refresh Token or Access Token to revoke
:raises ValueError: if Refresh Token or Access Token value not specified
:raises `intuitlib.exceptions.AuthClientError`: if response status != 200
:return: True if token successfully revoked
"""
token_to_revoke = token or self.refresh_token or self.access_token
if token_to_revoke is None:
raise ValueError('Token to revoke not specified')
headers = {
'Content-Type': 'application/json',
'Authorization': get_auth_header(self.client_id, self.client_secret)
}
body = {
'token': token_to_revoke
}
send_request('POST', self.revoke_endpoint, headers, self, body=json.dumps(body), session=self)
return True
def get_user_info(self, access_token=None):
"""Gets User Info based on OpenID scopes specified
:param access_token: Access token
:raises ValueError: if Refresh Token or Access Token value not specified
:raises `intuitlib.exceptions.AuthClientError`: if response status != 200
:return: Requests object
"""
token = access_token or self.access_token
if token is None:
raise ValueError('Acceess token not specified')
headers = {
'Authorization': 'Bearer {0}'.format(token)
}
return send_request('GET', self.user_info_url, headers, self, session=self)
| 38.625
| 163
| 0.660194
|
from __future__ import absolute_import
import json
import requests
from future.moves.urllib.parse import urlencode
from intuitlib.utils import (
get_discovery_doc,
generate_token,
scopes_to_string,
get_auth_header,
send_request,
)
class AuthClient(requests.Session):
def __init__(self, client_id, client_secret, redirect_uri, environment, state_token=None, access_token=None, refresh_token=None, id_token=None, realm_id=None):
super(AuthClient, self).__init__()
self.client_id = client_id
self.client_secret = client_secret
self.redirect_uri = redirect_uri
self.environment = environment
self.state_token = state_token
discovery_doc = get_discovery_doc(self.environment, session=self)
self.auth_endpoint = discovery_doc['authorization_endpoint']
self.token_endpoint = discovery_doc['token_endpoint']
self.revoke_endpoint = discovery_doc['revocation_endpoint']
self.issuer_uri = discovery_doc['issuer']
self.jwks_uri = discovery_doc['jwks_uri']
self.user_info_url = discovery_doc['userinfo_endpoint']
self.realm_id = realm_id
self.access_token = access_token
self.expires_in = None
self.refresh_token = refresh_token
self.x_refresh_token_expires_in = None
self.id_token = id_token
def setAuthorizeURLs(self, urlObject):
if urlObject is not None:
self.auth_endpoint = urlObject['auth_endpoint']
self.token_endpoint = urlObject['token_endpoint']
self.revoke_endpoint = urlObject['revoke_endpoint']
self.user_info_url = urlObject['user_info_url']
return None
def get_authorization_url(self, scopes, state_token=None):
state = state_token or self.state_token
if state is None:
state = generate_token()
self.state_token = state
url_params = {
'client_id': self.client_id,
'response_type': 'code',
'scope': scopes_to_string(scopes),
'redirect_uri': self.redirect_uri,
'state': self.state_token
}
return '?'.join([self.auth_endpoint, urlencode(url_params)])
def get_bearer_token(self, auth_code, realm_id=None):
realm = realm_id or self.realm_id
if realm is not None:
self.realm_id = realm
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': get_auth_header(self.client_id, self.client_secret)
}
body = {
'grant_type': 'authorization_code',
'code': auth_code,
'redirect_uri': self.redirect_uri
}
send_request('POST', self.token_endpoint, headers, self, body=urlencode(body), session=self)
def refresh(self, refresh_token=None):
token = refresh_token or self.refresh_token
if token is None:
raise ValueError('Refresh token not specified')
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': get_auth_header(self.client_id, self.client_secret)
}
body = {
'grant_type': 'refresh_token',
'refresh_token': token
}
send_request('POST', self.token_endpoint, headers, self, body=urlencode(body), session=self)
def revoke(self, token=None):
token_to_revoke = token or self.refresh_token or self.access_token
if token_to_revoke is None:
raise ValueError('Token to revoke not specified')
headers = {
'Content-Type': 'application/json',
'Authorization': get_auth_header(self.client_id, self.client_secret)
}
body = {
'token': token_to_revoke
}
send_request('POST', self.revoke_endpoint, headers, self, body=json.dumps(body), session=self)
return True
def get_user_info(self, access_token=None):
token = access_token or self.access_token
if token is None:
raise ValueError('Acceess token not specified')
headers = {
'Authorization': 'Bearer {0}'.format(token)
}
return send_request('GET', self.user_info_url, headers, self, session=self)
| true
| true
|
1c414405098f4d8de3926b8b81db921ab8604753
| 619
|
py
|
Python
|
python/send_udp.py
|
zhangwp-cn/study-notes
|
2c4a136741288fa9ce1a480ce5e03c5a0ca48880
|
[
"Apache-2.0"
] | null | null | null |
python/send_udp.py
|
zhangwp-cn/study-notes
|
2c4a136741288fa9ce1a480ce5e03c5a0ca48880
|
[
"Apache-2.0"
] | null | null | null |
python/send_udp.py
|
zhangwp-cn/study-notes
|
2c4a136741288fa9ce1a480ce5e03c5a0ca48880
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import socket
def main():
# 创建一个udp套接字
udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# 可以使用套接字收发数据
# udp_socket.sendto(内容(必须是bytes类型), 对方的ip以及port)
# udp_socket.sendto(b'test', ('127.0.0.1', 8000))
while True:
# 从键盘获取数据(第二种方法)
send_data = raw_input('请输入要发送的内容:')
# 如果输入的数据是exit,那么就退出程序
if send_data == 'exit':
break
udp_socket.sendto(send_data.encode('utf-8'), ('127.0.0.1', 8000))
print('----22------')
#关闭套接字
udp_socket.close()
if __name__ == '__main__':
main()
| 22.925926
| 73
| 0.588045
|
import socket
def main():
udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
while True:
send_data = raw_input('请输入要发送的内容:')
if send_data == 'exit':
break
udp_socket.sendto(send_data.encode('utf-8'), ('127.0.0.1', 8000))
print('----22------')
udp_socket.close()
if __name__ == '__main__':
main()
| true
| true
|
1c4144f7a7b43a32656b17956b5c813f53af84c4
| 57
|
py
|
Python
|
TD3/TD3.py
|
jordan-schneider/TD3
|
39d368e1e3412f97785bdd55a90ef06691e9ad0e
|
[
"MIT"
] | null | null | null |
TD3/TD3.py
|
jordan-schneider/TD3
|
39d368e1e3412f97785bdd55a90ef06691e9ad0e
|
[
"MIT"
] | null | null | null |
TD3/TD3.py
|
jordan-schneider/TD3
|
39d368e1e3412f97785bdd55a90ef06691e9ad0e
|
[
"MIT"
] | null | null | null |
from .td3_arch import Td3 as TD3
from .td3_arch import *
| 19
| 32
| 0.77193
|
from .td3_arch import Td3 as TD3
from .td3_arch import *
| true
| true
|
1c4146664f300917a2b0a45b90930582a8f1102a
| 732
|
py
|
Python
|
Math/NumberSystems/ChangeBases.py
|
xdanielsb/Marathon-book
|
620f1eb9ce54fc05a923e087ef1b130c98251b60
|
[
"MIT"
] | 4
|
2017-01-15T04:59:55.000Z
|
2018-04-06T19:51:49.000Z
|
Math/NumberSystems/ChangeBases.py
|
xdanielsb/MarathonBook
|
620f1eb9ce54fc05a923e087ef1b130c98251b60
|
[
"MIT"
] | 1
|
2017-02-21T01:00:51.000Z
|
2017-03-06T03:24:27.000Z
|
Math/NumberSystems/ChangeBases.py
|
xdanielsb/MarathonBook
|
620f1eb9ce54fc05a923e087ef1b130c98251b60
|
[
"MIT"
] | null | null | null |
# coding=utf-8
""" CHANGE THE BASE OF A NUMBER
ob -> origin base
od -> destiny base
"""
chars = "0123456789ABCDEFGHIJKLMNÑOPQRSTUVWXYZ"
def changeBase(number, ob,od):
if ob == 10:
return tob(number, od)
return tob(to10(number,ob),od)
""" FROM ANY BASE TO BASE 10
b -> base of the number n
pos -> location of a sub-number in n
"""
def to10(n, b, mul=1):
if n == 0: return 0
return (n % 10)* mul + to10(n // 10, b, mul*b)
"""FROM TEN BASE TO ANOTHER BASE"""
def tob(n, b):
if n == 0: return ''
return tob(n // b, b) + chars[n % b]
def main():
print ( tob(7,2))
print ( tob(252,16))
print ( tob(234,15))
print ( to10(1000,2))
print ( changeBase(111,2,10))
main()
| 22.181818
| 49
| 0.575137
|
chars = "0123456789ABCDEFGHIJKLMNÑOPQRSTUVWXYZ"
def changeBase(number, ob,od):
if ob == 10:
return tob(number, od)
return tob(to10(number,ob),od)
def to10(n, b, mul=1):
if n == 0: return 0
return (n % 10)* mul + to10(n // 10, b, mul*b)
def tob(n, b):
if n == 0: return ''
return tob(n // b, b) + chars[n % b]
def main():
print ( tob(7,2))
print ( tob(252,16))
print ( tob(234,15))
print ( to10(1000,2))
print ( changeBase(111,2,10))
main()
| true
| true
|
1c4146794a59767ffd3142e8770516dba7b49a0a
| 1,085
|
py
|
Python
|
Database_test_git.py
|
prashankkadam/Maer_1
|
e201866429a1231df7f439797ef100f9e4e6da37
|
[
"MIT"
] | null | null | null |
Database_test_git.py
|
prashankkadam/Maer_1
|
e201866429a1231df7f439797ef100f9e4e6da37
|
[
"MIT"
] | null | null | null |
Database_test_git.py
|
prashankkadam/Maer_1
|
e201866429a1231df7f439797ef100f9e4e6da37
|
[
"MIT"
] | null | null | null |
# -*- coding:/ utf-8 -*-
"""
Created on Tue Jul 23 12:07:20 2019
This piece of software is bound by The MIT License (MIT)
Copyright (c) 2019 Prashank Kadam
Code written by : Prashank Kadam
User name - ADM-PKA187
Email ID : prashank.kadam@maersktankers.com
Created on - Tue Jul 30 09:12:00 2019
version : 1.0
"""
import pyodbc
import pandas as pd
conn = pyodbc.connect('Driver={########################};'
'Server=##########################;'
'Database=########################;'
'uid=####################;'
'pwd=######################;'
'Trusted_Connection=###;')
cursor = conn.cursor()
#Getting all the tables in the database
#for table_name in cursor.tables(tableType='TABLE'):
#print(table_name)
# Coverting the SELECT query into a pandas dataframe
sql = 'SELECT TOP 10 * FROM db.table'
data = pd.read_sql(sql, conn)
print(data.columns)
data.to_excel(r'C:\Users\ADM-PKA187\Desktop\Dastabase\db_test.xlsx')
| 27.125
| 69
| 0.530876
|
import pyodbc
import pandas as pd
conn = pyodbc.connect('Driver={########################};'
'Server=##########################;'
'Database=########################;'
'uid=####################;'
'pwd=######################;'
'Trusted_Connection=###;')
cursor = conn.cursor()
sql = 'SELECT TOP 10 * FROM db.table'
data = pd.read_sql(sql, conn)
print(data.columns)
data.to_excel(r'C:\Users\ADM-PKA187\Desktop\Dastabase\db_test.xlsx')
| true
| true
|
1c414684afb6797e41e60598c7a2d4084b0e1a4a
| 155
|
py
|
Python
|
config.py
|
TomBursch/Custom-Physics
|
d3a8eed2ac0129de6c6716be5ddd8e570fcb8c7d
|
[
"MIT"
] | null | null | null |
config.py
|
TomBursch/Custom-Physics
|
d3a8eed2ac0129de6c6716be5ddd8e570fcb8c7d
|
[
"MIT"
] | null | null | null |
config.py
|
TomBursch/Custom-Physics
|
d3a8eed2ac0129de6c6716be5ddd8e570fcb8c7d
|
[
"MIT"
] | null | null | null |
def can_build(env, platform):
return True
def configure(env):
pass
def get_doc_classes():
return [
]
def get_doc_path():
return ""
| 11.071429
| 29
| 0.625806
|
def can_build(env, platform):
return True
def configure(env):
pass
def get_doc_classes():
return [
]
def get_doc_path():
return ""
| true
| true
|
1c41469b5ae72bc6e196a08036b5af82bec49d17
| 49,782
|
py
|
Python
|
discord/channel.py
|
wasi-master/discord.py
|
99aa562ca1c206ee0c41f0fc64599a293af45855
|
[
"MIT"
] | null | null | null |
discord/channel.py
|
wasi-master/discord.py
|
99aa562ca1c206ee0c41f0fc64599a293af45855
|
[
"MIT"
] | null | null | null |
discord/channel.py
|
wasi-master/discord.py
|
99aa562ca1c206ee0c41f0fc64599a293af45855
|
[
"MIT"
] | null | null | null |
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import time
import asyncio
import discord.abc
from .permissions import Permissions
from .enums import ChannelType, try_enum, VoiceRegion, PartyType
from .mixins import Hashable
from . import utils
from .asset import Asset
from .errors import ClientException, NoMoreItems, InvalidArgument
__all__ = (
'TextChannel',
'VoiceChannel',
'StageChannel',
'DMChannel',
'CategoryChannel',
'StoreChannel',
'GroupChannel',
'_channel_factory',
)
async def _single_delete_strategy(messages):
for m in messages:
await m.delete()
class TextChannel(discord.abc.Messageable, discord.abc.GuildChannel, Hashable):
"""Represents a Discord guild text channel.
.. container:: operations
.. describe:: x == y
Checks if two channels are equal.
.. describe:: x != y
Checks if two channels are not equal.
.. describe:: hash(x)
Returns the channel's hash.
.. describe:: str(x)
Returns the channel's name.
Attributes
-----------
name: :class:`str`
The channel name.
guild: :class:`Guild`
The guild the channel belongs to.
id: :class:`int`
The channel ID.
category_id: Optional[:class:`int`]
The category channel ID this channel belongs to, if applicable.
topic: Optional[:class:`str`]
The channel's topic. ``None`` if it doesn't exist.
position: :class:`int`
The position in the channel list. This is a number that starts at 0. e.g. the
top channel is position 0.
last_message_id: Optional[:class:`int`]
The last message ID of the message sent to this channel. It may
*not* point to an existing or valid message.
slowmode_delay: :class:`int`
The number of seconds a member must wait between sending messages
in this channel. A value of `0` denotes that it is disabled.
Bots and users with :attr:`~Permissions.manage_channels` or
:attr:`~Permissions.manage_messages` bypass slowmode.
"""
__slots__ = ('name', 'id', 'guild', 'topic', '_state', 'nsfw',
'category_id', 'position', 'slowmode_delay', '_overwrites',
'_type', 'last_message_id')
def __init__(self, *, state, guild, data):
self._state = state
self.id = int(data['id'])
self._type = data['type']
self._update(guild, data)
def __repr__(self):
attrs = [
('id', self.id),
('name', self.name),
('position', self.position),
('nsfw', self.nsfw),
('news', self.is_news()),
('category_id', self.category_id)
]
joined = ' '.join('%s=%r' % t for t in attrs)
return f'<{self.__class__.__name__} {joined}>'
def _update(self, guild, data):
self.guild = guild
self.name = data['name']
self.category_id = utils._get_as_snowflake(data, 'parent_id')
self.topic = data.get('topic')
self.position = data['position']
self.nsfw = data.get('nsfw', False)
# Does this need coercion into `int`? No idea yet.
self.slowmode_delay = data.get('rate_limit_per_user', 0)
self._type = data.get('type', self._type)
self.last_message_id = utils._get_as_snowflake(data, 'last_message_id')
self._fill_overwrites(data)
async def _get_channel(self):
return self
@property
def type(self):
""":class:`ChannelType`: The channel's Discord type."""
return try_enum(ChannelType, self._type)
@property
def _sorting_bucket(self):
return ChannelType.text.value
@property
def can_send(self):
""":class:`bool`: Checks if the bot can send messages
.. versionadded:: 1.5.0.2"""
return self.permissions_for(self.guild.me).send_messages
@utils.copy_doc(discord.abc.GuildChannel.permissions_for)
def permissions_for(self, member):
base = super().permissions_for(member)
# text channels do not have voice related permissions
denied = Permissions.voice()
base.value &= ~denied.value
return base
@property
def members(self):
"""List[:class:`Member`]: Returns all members that can see this channel."""
return [m for m in self.guild.members if self.permissions_for(m).read_messages]
def is_nsfw(self):
""":class:`bool`: Checks if the channel is NSFW."""
return self.nsfw
def is_news(self):
""":class:`bool`: Checks if the channel is a news channel."""
return self._type == ChannelType.news.value
@property
def last_message(self):
"""Fetches the last message from this channel in cache.
The message might not be valid or point to an existing message.
.. admonition:: Reliable Fetching
:class: helpful
For a slightly more reliable method of fetching the
last message, consider using either :meth:`history`
or :meth:`fetch_message` with the :attr:`last_message_id`
attribute.
Returns
---------
Optional[:class:`Message`]
The last message in this channel or ``None`` if not found.
"""
return self._state._get_message(self.last_message_id) if self.last_message_id else None
async def edit(self, *, reason=None, **options):
"""|coro|
Edits the channel.
You must have the :attr:`~Permissions.manage_channels` permission to
use this.
.. versionchanged:: 1.3
The ``overwrites`` keyword-only parameter was added.
.. versionchanged:: 1.4
The ``type`` keyword-only parameter was added.
Parameters
----------
name: :class:`str`
The new channel name.
topic: :class:`str`
The new channel's topic.
position: :class:`int`
The new channel's position.
nsfw: :class:`bool`
To mark the channel as NSFW or not.
sync_permissions: :class:`bool`
Whether to sync permissions with the channel's new or pre-existing
category. Defaults to ``False``.
category: Optional[:class:`CategoryChannel`]
The new category for this channel. Can be ``None`` to remove the
category.
slowmode_delay: :class:`int`
Specifies the slowmode rate limit for user in this channel, in seconds.
A value of `0` disables slowmode. The maximum value possible is `21600`.
type: :class:`ChannelType`
Change the type of this text channel. Currently, only conversion between
:attr:`ChannelType.text` and :attr:`ChannelType.news` is supported. This
is only available to guilds that contain ``NEWS`` in :attr:`Guild.features`.
reason: Optional[:class:`str`]
The reason for editing this channel. Shows up on the audit log.
overwrites: :class:`dict`
A :class:`dict` of target (either a role or a member) to
:class:`PermissionOverwrite` to apply to the channel.
Raises
------
InvalidArgument
If position is less than 0 or greater than the number of channels, or if
the permission overwrite information is not in proper form.
Forbidden
You do not have permissions to edit the channel.
HTTPException
Editing the channel failed.
"""
await self._edit(options, reason=reason)
@utils.copy_doc(discord.abc.GuildChannel.clone)
async def clone(self, *, name=None, reason=None):
return await self._clone_impl({
'topic': self.topic,
'nsfw': self.nsfw,
'rate_limit_per_user': self.slowmode_delay
}, name=name, reason=reason)
async def delete_messages(self, messages):
"""|coro|
Deletes a list of messages. This is similar to :meth:`Message.delete`
except it bulk deletes multiple messages.
As a special case, if the number of messages is 0, then nothing
is done. If the number of messages is 1 then single message
delete is done. If it's more than two, then bulk delete is used.
You cannot bulk delete more than 100 messages or messages that
are older than 14 days old.
You must have the :attr:`~Permissions.manage_messages` permission to
use this.
Usable only by bot accounts.
Parameters
-----------
messages: Iterable[:class:`abc.Snowflake`]
An iterable of messages denoting which ones to bulk delete.
Raises
------
ClientException
The number of messages to delete was more than 100.
Forbidden
You do not have proper permissions to delete the messages or
you're not using a bot account.
NotFound
If single delete, then the message was already deleted.
HTTPException
Deleting the messages failed.
"""
if not isinstance(messages, (list, tuple)):
messages = list(messages)
if len(messages) == 0:
return # do nothing
if len(messages) == 1:
message_id = messages[0].id
await self._state.http.delete_message(self.id, message_id)
return
if len(messages) > 100:
raise ClientException('Can only bulk delete messages up to 100 messages')
message_ids = [m.id for m in messages]
await self._state.http.delete_messages(self.id, message_ids)
async def purge(self, *, limit=100, check=None, before=None, after=None, around=None, oldest_first=False, bulk=True):
"""|coro|
Purges a list of messages that meet the criteria given by the predicate
``check``. If a ``check`` is not provided then all messages are deleted
without discrimination.
You must have the :attr:`~Permissions.manage_messages` permission to
delete messages even if they are your own (unless you are a user
account). The :attr:`~Permissions.read_message_history` permission is
also needed to retrieve message history.
Examples
---------
Deleting bot's messages ::
def is_me(m):
return m.author == client.user
deleted = await channel.purge(limit=100, check=is_me)
await channel.send('Deleted {} message(s)'.format(len(deleted)))
Parameters
-----------
limit: Optional[:class:`int`]
The number of messages to search through. This is not the number
of messages that will be deleted, though it can be.
check: Callable[[:class:`Message`], :class:`bool`]
The function used to check if a message should be deleted.
It must take a :class:`Message` as its sole parameter.
before: Optional[Union[:class:`abc.Snowflake`, :class:`datetime.datetime`]]
Same as ``before`` in :meth:`history`.
after: Optional[Union[:class:`abc.Snowflake`, :class:`datetime.datetime`]]
Same as ``after`` in :meth:`history`.
around: Optional[Union[:class:`abc.Snowflake`, :class:`datetime.datetime`]]
Same as ``around`` in :meth:`history`.
oldest_first: Optional[:class:`bool`]
Same as ``oldest_first`` in :meth:`history`.
bulk: :class:`bool`
If ``True``, use bulk delete. Setting this to ``False`` is useful for mass-deleting
a bot's own messages without :attr:`Permissions.manage_messages`. When ``True``, will
fall back to single delete if messages are older than two weeks.
Raises
-------
Forbidden
You do not have proper permissions to do the actions required.
HTTPException
Purging the messages failed.
Returns
--------
List[:class:`.Message`]
The list of messages that were deleted.
"""
if check is None:
check = lambda m: True
iterator = self.history(limit=limit, before=before, after=after, oldest_first=oldest_first, around=around)
ret = []
count = 0
minimum_time = int((time.time() - 14 * 24 * 60 * 60) * 1000.0 - 1420070400000) << 22
strategy = self.delete_messages if bulk else _single_delete_strategy
while True:
try:
msg = await iterator.next()
except NoMoreItems:
# no more messages to poll
if count >= 2:
# more than 2 messages -> bulk delete
to_delete = ret[-count:]
await strategy(to_delete)
elif count == 1:
# delete a single message
await ret[-1].delete()
return ret
else:
if count == 100:
# we've reached a full 'queue'
to_delete = ret[-100:]
await strategy(to_delete)
count = 0
await asyncio.sleep(1)
if check(msg):
if msg.id < minimum_time:
# older than 14 days old
if count == 1:
await ret[-1].delete()
elif count >= 2:
to_delete = ret[-count:]
await strategy(to_delete)
count = 0
strategy = _single_delete_strategy
count += 1
ret.append(msg)
async def webhooks(self):
"""|coro|
Gets the list of webhooks from this channel.
Requires :attr:`~.Permissions.manage_webhooks` permissions.
Raises
-------
Forbidden
You don't have permissions to get the webhooks.
Returns
--------
List[:class:`Webhook`]
The webhooks for this channel.
"""
from .webhook import Webhook
data = await self._state.http.channel_webhooks(self.id)
return [Webhook.from_state(d, state=self._state) for d in data]
async def create_webhook(self, *, name, avatar=None, reason=None):
"""|coro|
Creates a webhook for this channel.
Requires :attr:`~.Permissions.manage_webhooks` permissions.
.. versionchanged:: 1.1
Added the ``reason`` keyword-only parameter.
Parameters
-------------
name: :class:`str`
The webhook's name.
avatar: Optional[:class:`bytes`]
A :term:`py:bytes-like object` representing the webhook's default avatar.
This operates similarly to :meth:`~ClientUser.edit`.
reason: Optional[:class:`str`]
The reason for creating this webhook. Shows up in the audit logs.
Raises
-------
HTTPException
Creating the webhook failed.
Forbidden
You do not have permissions to create a webhook.
Returns
--------
:class:`Webhook`
The created webhook.
"""
from .webhook import Webhook
if avatar is not None:
avatar = utils._bytes_to_base64_data(avatar)
data = await self._state.http.create_webhook(self.id, name=str(name), avatar=avatar, reason=reason)
return Webhook.from_state(data, state=self._state)
async def follow(self, *, destination, reason=None):
"""
Follows a channel using a webhook.
Only news channels can be followed.
.. note::
The webhook returned will not provide a token to do webhook
actions, as Discord does not provide it.
.. versionadded:: 1.3
Parameters
-----------
destination: :class:`TextChannel`
The channel you would like to follow from.
reason: Optional[:class:`str`]
The reason for following the channel. Shows up on the destination guild's audit log.
.. versionadded:: 1.4
Raises
-------
HTTPException
Following the channel failed.
Forbidden
You do not have the permissions to create a webhook.
Returns
--------
:class:`Webhook`
The created webhook.
"""
if not self.is_news():
raise ClientException('The channel must be a news channel.')
if not isinstance(destination, TextChannel):
raise InvalidArgument('Expected TextChannel received {0.__name__}'.format(type(destination)))
from .webhook import Webhook
data = await self._state.http.follow_webhook(self.id, webhook_channel_id=destination.id, reason=reason)
return Webhook._as_follower(data, channel=destination, user=self._state.user)
def get_partial_message(self, message_id):
"""Creates a :class:`PartialMessage` from the message ID.
This is useful if you want to work with a message and only have its ID without
doing an unnecessary API call.
.. versionadded:: 1.6
Parameters
------------
message_id: :class:`int`
The message ID to create a partial message for.
Returns
---------
:class:`PartialMessage`
The partial message.
"""
from .message import PartialMessage
return PartialMessage(channel=self, id=message_id)
class VocalGuildChannel(discord.abc.Connectable, discord.abc.GuildChannel, Hashable):
__slots__ = ('name', 'id', 'guild', 'bitrate', 'user_limit',
'_state', 'position', '_overwrites', 'category_id',
'rtc_region')
def __init__(self, *, state, guild, data):
self._state = state
self.id = int(data['id'])
self._update(guild, data)
def _get_voice_client_key(self):
return self.guild.id, 'guild_id'
def _get_voice_state_pair(self):
return self.guild.id, self.id
def _update(self, guild, data):
self.guild = guild
self.name = data['name']
self.rtc_region = data.get('rtc_region')
if self.rtc_region:
self.rtc_region = try_enum(VoiceRegion, self.rtc_region)
self.category_id = utils._get_as_snowflake(data, 'parent_id')
self.position = data['position']
self.bitrate = data.get('bitrate')
self.user_limit = data.get('user_limit')
self._fill_overwrites(data)
@property
def _sorting_bucket(self):
return ChannelType.voice.value
@property
def members(self):
"""List[:class:`Member`]: Returns all members that are currently inside this voice channel."""
ret = []
for user_id, state in self.guild._voice_states.items():
if state.channel and state.channel.id == self.id:
member = self.guild.get_member(user_id)
if member is not None:
ret.append(member)
return ret
@property
def voice_states(self):
"""Returns a mapping of member IDs who have voice states in this channel.
.. versionadded:: 1.3
.. note::
This function is intentionally low level to replace :attr:`members`
when the member cache is unavailable.
Returns
--------
Mapping[:class:`int`, :class:`VoiceState`]
The mapping of member ID to a voice state.
"""
return {key: value for key, value in self.guild._voice_states.items() if value.channel.id == self.id}
@utils.copy_doc(discord.abc.GuildChannel.permissions_for)
def permissions_for(self, member):
base = super().permissions_for(member)
# voice channels cannot be edited by people who can't connect to them
# It also implicitly denies all other voice perms
if not base.connect:
denied = Permissions.voice()
denied.update(manage_channels=True, manage_roles=True)
base.value &= ~denied.value
return base
class VoiceChannel(VocalGuildChannel):
"""Represents a Discord guild voice channel.
.. container:: operations
.. describe:: x == y
Checks if two channels are equal.
.. describe:: x != y
Checks if two channels are not equal.
.. describe:: hash(x)
Returns the channel's hash.
.. describe:: str(x)
Returns the channel's name.
Attributes
-----------
name: :class:`str`
The channel name.
guild: :class:`Guild`
The guild the channel belongs to.
id: :class:`int`
The channel ID.
category_id: Optional[:class:`int`]
The category channel ID this channel belongs to, if applicable.
position: :class:`int`
The position in the channel list. This is a number that starts at 0. e.g. the
top channel is position 0.
bitrate: :class:`int`
The channel's preferred audio bitrate in bits per second.
user_limit: :class:`int`
The channel's limit for number of members that can be in a voice channel.
rtc_region: Optional[:class:`VoiceRegion`]
The region for the voice channel's voice communication.
A value of ``None`` indicates automatic voice region detection.
.. versionadded:: 1.7
"""
__slots__ = ()
def __repr__(self):
attrs = [
('id', self.id),
('name', self.name),
('rtc_region', self.rtc_region),
('position', self.position),
('bitrate', self.bitrate),
('user_limit', self.user_limit),
('category_id', self.category_id)
]
joined = ' '.join('%s=%r' % t for t in attrs)
return f'<{self.__class__.__name__} {joined}>'
@property
def type(self):
""":class:`ChannelType`: The channel's Discord type."""
return ChannelType.voice
@utils.copy_doc(discord.abc.GuildChannel.clone)
async def clone(self, *, name=None, reason=None):
return await self._clone_impl({
'bitrate': self.bitrate,
'user_limit': self.user_limit
}, name=name, reason=reason)
async def edit(self, *, reason=None, **options):
"""|coro|
Edits the channel.
You must have the :attr:`~Permissions.manage_channels` permission to
use this.
.. versionchanged:: 1.3
The ``overwrites`` keyword-only parameter was added.
Parameters
----------
name: :class:`str`
The new channel's name.
bitrate: :class:`int`
The new channel's bitrate.
user_limit: :class:`int`
The new channel's user limit.
position: :class:`int`
The new channel's position.
sync_permissions: :class:`bool`
Whether to sync permissions with the channel's new or pre-existing
category. Defaults to ``False``.
category: Optional[:class:`CategoryChannel`]
The new category for this channel. Can be ``None`` to remove the
category.
reason: Optional[:class:`str`]
The reason for editing this channel. Shows up on the audit log.
overwrites: :class:`dict`
A :class:`dict` of target (either a role or a member) to
:class:`PermissionOverwrite` to apply to the channel.
rtc_region: Optional[:class:`VoiceRegion`]
The new region for the voice channel's voice communication.
A value of ``None`` indicates automatic voice region detection.
.. versionadded:: 1.7
Raises
------
InvalidArgument
If the permission overwrite information is not in proper form.
Forbidden
You do not have permissions to edit the channel.
HTTPException
Editing the channel failed.
"""
await self._edit(options, reason=reason)
async def create_party(self, application_id: PartyType, max_age: int = 86400 , max_uses: int = 0) -> Party:
"""|coro|
Creates a party in this voice channel.
.. versionadded:: 1.3
Raises
-------
Forbidden
You do not have permissions to create a party.
HTTPException
Party creation failed.
Returns
--------
:class:`Party`
The created party.
"""
return Party(await self._state.http.create_party(self.id, application_id.value, max_age=max_age, max_uses=max_uses))
class StageChannel(VocalGuildChannel):
"""Represents a Discord guild stage channel.
.. versionadded:: 1.7
.. container:: operations
.. describe:: x == y
Checks if two channels are equal.
.. describe:: x != y
Checks if two channels are not equal.
.. describe:: hash(x)
Returns the channel's hash.
.. describe:: str(x)
Returns the channel's name.
Attributes
-----------
name: :class:`str`
The channel name.
guild: :class:`Guild`
The guild the channel belongs to.
id: :class:`int`
The channel ID.
topic: Optional[:class:`str`]
The channel's topic. ``None`` if it isn't set.
category_id: Optional[:class:`int`]
The category channel ID this channel belongs to, if applicable.
position: :class:`int`
The position in the channel list. This is a number that starts at 0. e.g. the
top channel is position 0.
bitrate: :class:`int`
The channel's preferred audio bitrate in bits per second.
user_limit: :class:`int`
The channel's limit for number of members that can be in a stage channel.
rtc_region: Optional[:class:`VoiceRegion`]
The region for the stage channel's voice communication.
A value of ``None`` indicates automatic voice region detection.
"""
__slots__ = ('topic',)
def __repr__(self):
attrs = [
('id', self.id),
('name', self.name),
('topic', self.topic),
('rtc_region', self.rtc_region),
('position', self.position),
('bitrate', self.bitrate),
('user_limit', self.user_limit),
('category_id', self.category_id)
]
joined = ' '.join('%s=%r' % t for t in attrs)
return f'<{self.__class__.__name__} {joined}>'
def _update(self, guild, data):
super()._update(guild, data)
self.topic = data.get('topic')
@property
def requesting_to_speak(self):
"""List[:class:`Member`]: A list of members who are requesting to speak in the stage channel."""
return [member for member in self.members if member.voice.requested_to_speak_at is not None]
@property
def type(self):
""":class:`ChannelType`: The channel's Discord type."""
return ChannelType.stage_voice
@utils.copy_doc(discord.abc.GuildChannel.clone)
async def clone(self, *, name=None, reason=None):
return await self._clone_impl({
'topic': self.topic,
}, name=name, reason=reason)
async def edit(self, *, reason=None, **options):
"""|coro|
Edits the channel.
You must have the :attr:`~Permissions.manage_channels` permission to
use this.
Parameters
----------
name: :class:`str`
The new channel's name.
topic: :class:`str`
The new channel's topic.
position: :class:`int`
The new channel's position.
sync_permissions: :class:`bool`
Whether to sync permissions with the channel's new or pre-existing
category. Defaults to ``False``.
category: Optional[:class:`CategoryChannel`]
The new category for this channel. Can be ``None`` to remove the
category.
reason: Optional[:class:`str`]
The reason for editing this channel. Shows up on the audit log.
overwrites: :class:`dict`
A :class:`dict` of target (either a role or a member) to
:class:`PermissionOverwrite` to apply to the channel.
rtc_region: Optional[:class:`VoiceRegion`]
The new region for the stage channel's voice communication.
A value of ``None`` indicates automatic voice region detection.
Raises
------
InvalidArgument
If the permission overwrite information is not in proper form.
Forbidden
You do not have permissions to edit the channel.
HTTPException
Editing the channel failed.
"""
await self._edit(options, reason=reason)
class CategoryChannel(discord.abc.GuildChannel, Hashable):
"""Represents a Discord channel category.
These are useful to group channels to logical compartments.
.. container:: operations
.. describe:: x == y
Checks if two channels are equal.
.. describe:: x != y
Checks if two channels are not equal.
.. describe:: hash(x)
Returns the category's hash.
.. describe:: str(x)
Returns the category's name.
Attributes
-----------
name: :class:`str`
The category name.
guild: :class:`Guild`
The guild the category belongs to.
id: :class:`int`
The category channel ID.
position: :class:`int`
The position in the category list. This is a number that starts at 0. e.g. the
top category is position 0.
"""
__slots__ = ('name', 'id', 'guild', 'nsfw', '_state', 'position', '_overwrites', 'category_id')
def __init__(self, *, state, guild, data):
self._state = state
self.id = int(data['id'])
self._update(guild, data)
def __repr__(self):
return '<CategoryChannel id={0.id} name={0.name!r} position={0.position} nsfw={0.nsfw}>'.format(self)
def _update(self, guild, data):
self.guild = guild
self.name = data['name']
self.category_id = utils._get_as_snowflake(data, 'parent_id')
self.nsfw = data.get('nsfw', False)
self.position = data['position']
self._fill_overwrites(data)
@property
def _sorting_bucket(self):
return ChannelType.category.value
@property
def type(self):
""":class:`ChannelType`: The channel's Discord type."""
return ChannelType.category
def is_nsfw(self):
""":class:`bool`: Checks if the category is NSFW."""
return self.nsfw
@utils.copy_doc(discord.abc.GuildChannel.clone)
async def clone(self, *, name=None, reason=None):
return await self._clone_impl({
'nsfw': self.nsfw
}, name=name, reason=reason)
async def edit(self, *, reason=None, **options):
"""|coro|
Edits the channel.
You must have the :attr:`~Permissions.manage_channels` permission to
use this.
.. versionchanged:: 1.3
The ``overwrites`` keyword-only parameter was added.
Parameters
----------
name: :class:`str`
The new category's name.
position: :class:`int`
The new category's position.
nsfw: :class:`bool`
To mark the category as NSFW or not.
reason: Optional[:class:`str`]
The reason for editing this category. Shows up on the audit log.
overwrites: :class:`dict`
A :class:`dict` of target (either a role or a member) to
:class:`PermissionOverwrite` to apply to the channel.
Raises
------
InvalidArgument
If position is less than 0 or greater than the number of categories.
Forbidden
You do not have permissions to edit the category.
HTTPException
Editing the category failed.
"""
await self._edit(options=options, reason=reason)
@utils.copy_doc(discord.abc.GuildChannel.move)
async def move(self, **kwargs):
kwargs.pop('category', None)
await super().move(**kwargs)
@property
def channels(self):
"""List[:class:`abc.GuildChannel`]: Returns the channels that are under this category.
These are sorted by the official Discord UI, which places voice channels below the text channels.
"""
def comparator(channel):
return (not isinstance(channel, TextChannel), channel.position)
ret = [c for c in self.guild.channels if c.category_id == self.id]
ret.sort(key=comparator)
return ret
@property
def text_channels(self):
"""List[:class:`TextChannel`]: Returns the text channels that are under this category."""
ret = [c for c in self.guild.channels
if c.category_id == self.id
and isinstance(c, TextChannel)]
ret.sort(key=lambda c: (c.position, c.id))
return ret
@property
def voice_channels(self):
"""List[:class:`VoiceChannel`]: Returns the voice channels that are under this category."""
ret = [c for c in self.guild.channels
if c.category_id == self.id
and isinstance(c, VoiceChannel)]
ret.sort(key=lambda c: (c.position, c.id))
return ret
@property
def stage_channels(self):
"""List[:class:`StageChannel`]: Returns the voice channels that are under this category.
.. versionadded:: 1.7
"""
ret = [c for c in self.guild.channels
if c.category_id == self.id
and isinstance(c, StageChannel)]
ret.sort(key=lambda c: (c.position, c.id))
return ret
async def create_text_channel(self, name, *, overwrites=None, reason=None, **options):
"""|coro|
A shortcut method to :meth:`Guild.create_text_channel` to create a :class:`TextChannel` in the category.
Returns
-------
:class:`TextChannel`
The channel that was just created.
"""
return await self.guild.create_text_channel(name, overwrites=overwrites, category=self, reason=reason, **options)
async def create_voice_channel(self, name, *, overwrites=None, reason=None, **options):
"""|coro|
A shortcut method to :meth:`Guild.create_voice_channel` to create a :class:`VoiceChannel` in the category.
Returns
-------
:class:`VoiceChannel`
The channel that was just created.
"""
return await self.guild.create_voice_channel(name, overwrites=overwrites, category=self, reason=reason, **options)
async def create_stage_channel(self, name, *, overwrites=None, reason=None, **options):
"""|coro|
A shortcut method to :meth:`Guild.create_stage_channel` to create a :class:`StageChannel` in the category.
.. versionadded:: 1.7
Returns
-------
:class:`StageChannel`
The channel that was just created.
"""
return await self.guild.create_stage_channel(name, overwrites=overwrites, category=self, reason=reason, **options)
class StoreChannel(discord.abc.GuildChannel, Hashable):
"""Represents a Discord guild store channel.
.. container:: operations
.. describe:: x == y
Checks if two channels are equal.
.. describe:: x != y
Checks if two channels are not equal.
.. describe:: hash(x)
Returns the channel's hash.
.. describe:: str(x)
Returns the channel's name.
Attributes
-----------
name: :class:`str`
The channel name.
guild: :class:`Guild`
The guild the channel belongs to.
id: :class:`int`
The channel ID.
category_id: :class:`int`
The category channel ID this channel belongs to.
position: :class:`int`
The position in the channel list. This is a number that starts at 0. e.g. the
top channel is position 0.
"""
__slots__ = ('name', 'id', 'guild', '_state', 'nsfw',
'category_id', 'position', '_overwrites',)
def __init__(self, *, state, guild, data):
self._state = state
self.id = int(data['id'])
self._update(guild, data)
def __repr__(self):
return '<StoreChannel id={0.id} name={0.name!r} position={0.position} nsfw={0.nsfw}>'.format(self)
def _update(self, guild, data):
self.guild = guild
self.name = data['name']
self.category_id = utils._get_as_snowflake(data, 'parent_id')
self.position = data['position']
self.nsfw = data.get('nsfw', False)
self._fill_overwrites(data)
@property
def _sorting_bucket(self):
return ChannelType.text.value
@property
def type(self):
""":class:`ChannelType`: The channel's Discord type."""
return ChannelType.store
@utils.copy_doc(discord.abc.GuildChannel.permissions_for)
def permissions_for(self, member):
base = super().permissions_for(member)
# store channels do not have voice related permissions
denied = Permissions.voice()
base.value &= ~denied.value
return base
def is_nsfw(self):
""":class:`bool`: Checks if the channel is NSFW."""
return self.nsfw
@utils.copy_doc(discord.abc.GuildChannel.clone)
async def clone(self, *, name=None, reason=None):
return await self._clone_impl({
'nsfw': self.nsfw
}, name=name, reason=reason)
async def edit(self, *, reason=None, **options):
"""|coro|
Edits the channel.
You must have the :attr:`~Permissions.manage_channels` permission to
use this.
Parameters
----------
name: :class:`str`
The new channel name.
position: :class:`int`
The new channel's position.
nsfw: :class:`bool`
To mark the channel as NSFW or not.
sync_permissions: :class:`bool`
Whether to sync permissions with the channel's new or pre-existing
category. Defaults to ``False``.
category: Optional[:class:`CategoryChannel`]
The new category for this channel. Can be ``None`` to remove the
category.
reason: Optional[:class:`str`]
The reason for editing this channel. Shows up on the audit log.
overwrites: :class:`dict`
A :class:`dict` of target (either a role or a member) to
:class:`PermissionOverwrite` to apply to the channel.
.. versionadded:: 1.3
Raises
------
InvalidArgument
If position is less than 0 or greater than the number of channels, or if
the permission overwrite information is not in proper form.
Forbidden
You do not have permissions to edit the channel.
HTTPException
Editing the channel failed.
"""
await self._edit(options, reason=reason)
class DMChannel(discord.abc.Messageable, Hashable):
"""Represents a Discord direct message channel.
.. container:: operations
.. describe:: x == y
Checks if two channels are equal.
.. describe:: x != y
Checks if two channels are not equal.
.. describe:: hash(x)
Returns the channel's hash.
.. describe:: str(x)
Returns a string representation of the channel
Attributes
----------
recipient: :class:`User`
The user you are participating with in the direct message channel.
me: :class:`ClientUser`
The user presenting yourself.
id: :class:`int`
The direct message channel ID.
"""
__slots__ = ('id', 'recipient', 'me', '_state')
def __init__(self, *, me, state, data):
self._state = state
self.recipient = state.store_user(data['recipients'][0])
self.me = me
self.id = int(data['id'])
async def _get_channel(self):
return self
def __str__(self):
return f'Direct Message with {self.recipient}'
def __repr__(self):
return '<DMChannel id={0.id} recipient={0.recipient!r}>'.format(self)
@property
def type(self):
""":class:`ChannelType`: The channel's Discord type."""
return ChannelType.private
@property
def created_at(self):
""":class:`datetime.datetime`: Returns the direct message channel's creation time in UTC."""
return utils.snowflake_time(self.id)
def permissions_for(self, user=None):
"""Handles permission resolution for a :class:`User`.
This function is there for compatibility with other channel types.
Actual direct messages do not really have the concept of permissions.
This returns all the Text related permissions set to ``True`` except:
- :attr:`~Permissions.send_tts_messages`: You cannot send TTS messages in a DM.
- :attr:`~Permissions.manage_messages`: You cannot delete others messages in a DM.
Parameters
-----------
user: :class:`User`
The user to check permissions for. This parameter is ignored
but kept for compatibility.
Returns
--------
:class:`Permissions`
The resolved permissions.
"""
base = Permissions.text()
base.read_messages = True
base.send_tts_messages = False
base.manage_messages = False
return base
def get_partial_message(self, message_id):
"""Creates a :class:`PartialMessage` from the message ID.
This is useful if you want to work with a message and only have its ID without
doing an unnecessary API call.
.. versionadded:: 1.6
Parameters
------------
message_id: :class:`int`
The message ID to create a partial message for.
Returns
---------
:class:`PartialMessage`
The partial message.
"""
from .message import PartialMessage
return PartialMessage(channel=self, id=message_id)
class GroupChannel(discord.abc.Messageable, Hashable):
"""Represents a Discord group channel.
.. container:: operations
.. describe:: x == y
Checks if two channels are equal.
.. describe:: x != y
Checks if two channels are not equal.
.. describe:: hash(x)
Returns the channel's hash.
.. describe:: str(x)
Returns a string representation of the channel
Attributes
----------
recipients: List[:class:`User`]
The users you are participating with in the group channel.
me: :class:`ClientUser`
The user presenting yourself.
id: :class:`int`
The group channel ID.
owner: :class:`User`
The user that owns the group channel.
icon: Optional[:class:`str`]
The group channel's icon hash if provided.
name: Optional[:class:`str`]
The group channel's name if provided.
"""
__slots__ = ('id', 'recipients', 'owner', 'icon', 'name', 'me', '_state')
def __init__(self, *, me, state, data):
self._state = state
self.id = int(data['id'])
self.me = me
self._update_group(data)
def _update_group(self, data):
owner_id = utils._get_as_snowflake(data, 'owner_id')
self.icon = data.get('icon')
self.name = data.get('name')
try:
self.recipients = [self._state.store_user(u) for u in data['recipients']]
except KeyError:
pass
if owner_id == self.me.id:
self.owner = self.me
else:
self.owner = utils.find(lambda u: u.id == owner_id, self.recipients)
async def _get_channel(self):
return self
def __str__(self):
if self.name:
return self.name
if len(self.recipients) == 0:
return 'Unnamed'
return ', '.join(map(lambda x: x.name, self.recipients))
def __repr__(self):
return '<GroupChannel id={0.id} name={0.name!r}>'.format(self)
@property
def type(self):
""":class:`ChannelType`: The channel's Discord type."""
return ChannelType.group
@property
def icon_url(self):
""":class:`Asset`: Returns the channel's icon asset if available.
This is equivalent to calling :meth:`icon_url_as` with
the default parameters ('webp' format and a size of 1024).
"""
return self.icon_url_as()
def icon_url_as(self, *, format='webp', size=1024):
"""Returns an :class:`Asset` for the icon the channel has.
The format must be one of 'webp', 'jpeg', 'jpg' or 'png'.
The size must be a power of 2 between 16 and 4096.
.. versionadded:: 2.0
Parameters
-----------
format: :class:`str`
The format to attempt to convert the icon to. Defaults to 'webp'.
size: :class:`int`
The size of the image to display.
Raises
------
InvalidArgument
Bad image format passed to ``format`` or invalid ``size``.
Returns
--------
:class:`Asset`
The resulting CDN asset.
"""
return Asset._from_icon(self._state, self, 'channel', format=format, size=size)
@property
def created_at(self):
""":class:`datetime.datetime`: Returns the channel's creation time in UTC."""
return utils.snowflake_time(self.id)
def permissions_for(self, user):
"""Handles permission resolution for a :class:`User`.
This function is there for compatibility with other channel types.
Actual direct messages do not really have the concept of permissions.
This returns all the Text related permissions set to ``True`` except:
- :attr:`~Permissions.send_tts_messages`: You cannot send TTS messages in a DM.
- :attr:`~Permissions.manage_messages`: You cannot delete others messages in a DM.
This also checks the kick_members permission if the user is the owner.
Parameters
-----------
user: :class:`User`
The user to check permissions for.
Returns
--------
:class:`Permissions`
The resolved permissions for the user.
"""
base = Permissions.text()
base.read_messages = True
base.send_tts_messages = False
base.manage_messages = False
base.mention_everyone = True
if user.id == self.owner.id:
base.kick_members = True
return base
async def leave(self):
"""|coro|
Leave the group.
If you are the only one in the group, this deletes it as well.
Raises
-------
HTTPException
Leaving the group failed.
"""
await self._state.http.leave_group(self.id)
class Party:
"""Represents a party in a voice channel."""
def __init__(self, data):
self.code = data['code']
self.uses = data['uses']
self.max_uses = data['max_uses']
self.max_age = data['max_age']
self.temporary = data['temporary']
self.created_at = utils.parse_time(data.get('created_at'))
# TODO: add more fields here Such as guild, raw data: https://mystb.in/AdvertisersExperiencesMothers.json
def __repr__(self):
return f'<Party code={self.code}>'
def __str__(self):
return f'https://discord.gg/{self.code}'
def __eq__(self, other):
return isinstance(other, Party) and self.code == other.code
def _channel_factory(channel_type):
value = try_enum(ChannelType, channel_type)
if value is ChannelType.text:
return TextChannel, value
elif value is ChannelType.voice:
return VoiceChannel, value
elif value is ChannelType.private:
return DMChannel, value
elif value is ChannelType.category:
return CategoryChannel, value
elif value is ChannelType.group:
return GroupChannel, value
elif value is ChannelType.news:
return TextChannel, value
elif value is ChannelType.store:
return StoreChannel, value
elif value is ChannelType.stage_voice:
return StageChannel, value
else:
return None, value
| 32.601179
| 124
| 0.596702
|
import time
import asyncio
import discord.abc
from .permissions import Permissions
from .enums import ChannelType, try_enum, VoiceRegion, PartyType
from .mixins import Hashable
from . import utils
from .asset import Asset
from .errors import ClientException, NoMoreItems, InvalidArgument
__all__ = (
'TextChannel',
'VoiceChannel',
'StageChannel',
'DMChannel',
'CategoryChannel',
'StoreChannel',
'GroupChannel',
'_channel_factory',
)
async def _single_delete_strategy(messages):
for m in messages:
await m.delete()
class TextChannel(discord.abc.Messageable, discord.abc.GuildChannel, Hashable):
__slots__ = ('name', 'id', 'guild', 'topic', '_state', 'nsfw',
'category_id', 'position', 'slowmode_delay', '_overwrites',
'_type', 'last_message_id')
def __init__(self, *, state, guild, data):
self._state = state
self.id = int(data['id'])
self._type = data['type']
self._update(guild, data)
def __repr__(self):
attrs = [
('id', self.id),
('name', self.name),
('position', self.position),
('nsfw', self.nsfw),
('news', self.is_news()),
('category_id', self.category_id)
]
joined = ' '.join('%s=%r' % t for t in attrs)
return f'<{self.__class__.__name__} {joined}>'
def _update(self, guild, data):
self.guild = guild
self.name = data['name']
self.category_id = utils._get_as_snowflake(data, 'parent_id')
self.topic = data.get('topic')
self.position = data['position']
self.nsfw = data.get('nsfw', False)
self.slowmode_delay = data.get('rate_limit_per_user', 0)
self._type = data.get('type', self._type)
self.last_message_id = utils._get_as_snowflake(data, 'last_message_id')
self._fill_overwrites(data)
async def _get_channel(self):
return self
@property
def type(self):
return try_enum(ChannelType, self._type)
@property
def _sorting_bucket(self):
return ChannelType.text.value
@property
def can_send(self):
return self.permissions_for(self.guild.me).send_messages
@utils.copy_doc(discord.abc.GuildChannel.permissions_for)
def permissions_for(self, member):
base = super().permissions_for(member)
denied = Permissions.voice()
base.value &= ~denied.value
return base
@property
def members(self):
return [m for m in self.guild.members if self.permissions_for(m).read_messages]
def is_nsfw(self):
return self.nsfw
def is_news(self):
return self._type == ChannelType.news.value
@property
def last_message(self):
return self._state._get_message(self.last_message_id) if self.last_message_id else None
async def edit(self, *, reason=None, **options):
await self._edit(options, reason=reason)
@utils.copy_doc(discord.abc.GuildChannel.clone)
async def clone(self, *, name=None, reason=None):
return await self._clone_impl({
'topic': self.topic,
'nsfw': self.nsfw,
'rate_limit_per_user': self.slowmode_delay
}, name=name, reason=reason)
async def delete_messages(self, messages):
if not isinstance(messages, (list, tuple)):
messages = list(messages)
if len(messages) == 0:
return
if len(messages) == 1:
message_id = messages[0].id
await self._state.http.delete_message(self.id, message_id)
return
if len(messages) > 100:
raise ClientException('Can only bulk delete messages up to 100 messages')
message_ids = [m.id for m in messages]
await self._state.http.delete_messages(self.id, message_ids)
async def purge(self, *, limit=100, check=None, before=None, after=None, around=None, oldest_first=False, bulk=True):
if check is None:
check = lambda m: True
iterator = self.history(limit=limit, before=before, after=after, oldest_first=oldest_first, around=around)
ret = []
count = 0
minimum_time = int((time.time() - 14 * 24 * 60 * 60) * 1000.0 - 1420070400000) << 22
strategy = self.delete_messages if bulk else _single_delete_strategy
while True:
try:
msg = await iterator.next()
except NoMoreItems:
if count >= 2:
to_delete = ret[-count:]
await strategy(to_delete)
elif count == 1:
await ret[-1].delete()
return ret
else:
if count == 100:
to_delete = ret[-100:]
await strategy(to_delete)
count = 0
await asyncio.sleep(1)
if check(msg):
if msg.id < minimum_time:
# older than 14 days old
if count == 1:
await ret[-1].delete()
elif count >= 2:
to_delete = ret[-count:]
await strategy(to_delete)
count = 0
strategy = _single_delete_strategy
count += 1
ret.append(msg)
async def webhooks(self):
from .webhook import Webhook
data = await self._state.http.channel_webhooks(self.id)
return [Webhook.from_state(d, state=self._state) for d in data]
async def create_webhook(self, *, name, avatar=None, reason=None):
from .webhook import Webhook
if avatar is not None:
avatar = utils._bytes_to_base64_data(avatar)
data = await self._state.http.create_webhook(self.id, name=str(name), avatar=avatar, reason=reason)
return Webhook.from_state(data, state=self._state)
async def follow(self, *, destination, reason=None):
if not self.is_news():
raise ClientException('The channel must be a news channel.')
if not isinstance(destination, TextChannel):
raise InvalidArgument('Expected TextChannel received {0.__name__}'.format(type(destination)))
from .webhook import Webhook
data = await self._state.http.follow_webhook(self.id, webhook_channel_id=destination.id, reason=reason)
return Webhook._as_follower(data, channel=destination, user=self._state.user)
def get_partial_message(self, message_id):
from .message import PartialMessage
return PartialMessage(channel=self, id=message_id)
class VocalGuildChannel(discord.abc.Connectable, discord.abc.GuildChannel, Hashable):
__slots__ = ('name', 'id', 'guild', 'bitrate', 'user_limit',
'_state', 'position', '_overwrites', 'category_id',
'rtc_region')
def __init__(self, *, state, guild, data):
self._state = state
self.id = int(data['id'])
self._update(guild, data)
def _get_voice_client_key(self):
return self.guild.id, 'guild_id'
def _get_voice_state_pair(self):
return self.guild.id, self.id
def _update(self, guild, data):
self.guild = guild
self.name = data['name']
self.rtc_region = data.get('rtc_region')
if self.rtc_region:
self.rtc_region = try_enum(VoiceRegion, self.rtc_region)
self.category_id = utils._get_as_snowflake(data, 'parent_id')
self.position = data['position']
self.bitrate = data.get('bitrate')
self.user_limit = data.get('user_limit')
self._fill_overwrites(data)
@property
def _sorting_bucket(self):
return ChannelType.voice.value
@property
def members(self):
ret = []
for user_id, state in self.guild._voice_states.items():
if state.channel and state.channel.id == self.id:
member = self.guild.get_member(user_id)
if member is not None:
ret.append(member)
return ret
@property
def voice_states(self):
return {key: value for key, value in self.guild._voice_states.items() if value.channel.id == self.id}
@utils.copy_doc(discord.abc.GuildChannel.permissions_for)
def permissions_for(self, member):
base = super().permissions_for(member)
# voice channels cannot be edited by people who can't connect to them
if not base.connect:
denied = Permissions.voice()
denied.update(manage_channels=True, manage_roles=True)
base.value &= ~denied.value
return base
class VoiceChannel(VocalGuildChannel):
__slots__ = ()
def __repr__(self):
attrs = [
('id', self.id),
('name', self.name),
('rtc_region', self.rtc_region),
('position', self.position),
('bitrate', self.bitrate),
('user_limit', self.user_limit),
('category_id', self.category_id)
]
joined = ' '.join('%s=%r' % t for t in attrs)
return f'<{self.__class__.__name__} {joined}>'
@property
def type(self):
return ChannelType.voice
@utils.copy_doc(discord.abc.GuildChannel.clone)
async def clone(self, *, name=None, reason=None):
return await self._clone_impl({
'bitrate': self.bitrate,
'user_limit': self.user_limit
}, name=name, reason=reason)
async def edit(self, *, reason=None, **options):
await self._edit(options, reason=reason)
async def create_party(self, application_id: PartyType, max_age: int = 86400 , max_uses: int = 0) -> Party:
return Party(await self._state.http.create_party(self.id, application_id.value, max_age=max_age, max_uses=max_uses))
class StageChannel(VocalGuildChannel):
__slots__ = ('topic',)
def __repr__(self):
attrs = [
('id', self.id),
('name', self.name),
('topic', self.topic),
('rtc_region', self.rtc_region),
('position', self.position),
('bitrate', self.bitrate),
('user_limit', self.user_limit),
('category_id', self.category_id)
]
joined = ' '.join('%s=%r' % t for t in attrs)
return f'<{self.__class__.__name__} {joined}>'
def _update(self, guild, data):
super()._update(guild, data)
self.topic = data.get('topic')
@property
def requesting_to_speak(self):
return [member for member in self.members if member.voice.requested_to_speak_at is not None]
@property
def type(self):
return ChannelType.stage_voice
@utils.copy_doc(discord.abc.GuildChannel.clone)
async def clone(self, *, name=None, reason=None):
return await self._clone_impl({
'topic': self.topic,
}, name=name, reason=reason)
async def edit(self, *, reason=None, **options):
await self._edit(options, reason=reason)
class CategoryChannel(discord.abc.GuildChannel, Hashable):
__slots__ = ('name', 'id', 'guild', 'nsfw', '_state', 'position', '_overwrites', 'category_id')
def __init__(self, *, state, guild, data):
self._state = state
self.id = int(data['id'])
self._update(guild, data)
def __repr__(self):
return '<CategoryChannel id={0.id} name={0.name!r} position={0.position} nsfw={0.nsfw}>'.format(self)
def _update(self, guild, data):
self.guild = guild
self.name = data['name']
self.category_id = utils._get_as_snowflake(data, 'parent_id')
self.nsfw = data.get('nsfw', False)
self.position = data['position']
self._fill_overwrites(data)
@property
def _sorting_bucket(self):
return ChannelType.category.value
@property
def type(self):
return ChannelType.category
def is_nsfw(self):
return self.nsfw
@utils.copy_doc(discord.abc.GuildChannel.clone)
async def clone(self, *, name=None, reason=None):
return await self._clone_impl({
'nsfw': self.nsfw
}, name=name, reason=reason)
async def edit(self, *, reason=None, **options):
await self._edit(options=options, reason=reason)
@utils.copy_doc(discord.abc.GuildChannel.move)
async def move(self, **kwargs):
kwargs.pop('category', None)
await super().move(**kwargs)
@property
def channels(self):
def comparator(channel):
return (not isinstance(channel, TextChannel), channel.position)
ret = [c for c in self.guild.channels if c.category_id == self.id]
ret.sort(key=comparator)
return ret
@property
def text_channels(self):
ret = [c for c in self.guild.channels
if c.category_id == self.id
and isinstance(c, TextChannel)]
ret.sort(key=lambda c: (c.position, c.id))
return ret
@property
def voice_channels(self):
ret = [c for c in self.guild.channels
if c.category_id == self.id
and isinstance(c, VoiceChannel)]
ret.sort(key=lambda c: (c.position, c.id))
return ret
@property
def stage_channels(self):
ret = [c for c in self.guild.channels
if c.category_id == self.id
and isinstance(c, StageChannel)]
ret.sort(key=lambda c: (c.position, c.id))
return ret
async def create_text_channel(self, name, *, overwrites=None, reason=None, **options):
return await self.guild.create_text_channel(name, overwrites=overwrites, category=self, reason=reason, **options)
async def create_voice_channel(self, name, *, overwrites=None, reason=None, **options):
return await self.guild.create_voice_channel(name, overwrites=overwrites, category=self, reason=reason, **options)
async def create_stage_channel(self, name, *, overwrites=None, reason=None, **options):
return await self.guild.create_stage_channel(name, overwrites=overwrites, category=self, reason=reason, **options)
class StoreChannel(discord.abc.GuildChannel, Hashable):
__slots__ = ('name', 'id', 'guild', '_state', 'nsfw',
'category_id', 'position', '_overwrites',)
def __init__(self, *, state, guild, data):
self._state = state
self.id = int(data['id'])
self._update(guild, data)
def __repr__(self):
return '<StoreChannel id={0.id} name={0.name!r} position={0.position} nsfw={0.nsfw}>'.format(self)
def _update(self, guild, data):
self.guild = guild
self.name = data['name']
self.category_id = utils._get_as_snowflake(data, 'parent_id')
self.position = data['position']
self.nsfw = data.get('nsfw', False)
self._fill_overwrites(data)
@property
def _sorting_bucket(self):
return ChannelType.text.value
@property
def type(self):
return ChannelType.store
@utils.copy_doc(discord.abc.GuildChannel.permissions_for)
def permissions_for(self, member):
base = super().permissions_for(member)
denied = Permissions.voice()
base.value &= ~denied.value
return base
def is_nsfw(self):
return self.nsfw
@utils.copy_doc(discord.abc.GuildChannel.clone)
async def clone(self, *, name=None, reason=None):
return await self._clone_impl({
'nsfw': self.nsfw
}, name=name, reason=reason)
async def edit(self, *, reason=None, **options):
await self._edit(options, reason=reason)
class DMChannel(discord.abc.Messageable, Hashable):
__slots__ = ('id', 'recipient', 'me', '_state')
def __init__(self, *, me, state, data):
self._state = state
self.recipient = state.store_user(data['recipients'][0])
self.me = me
self.id = int(data['id'])
async def _get_channel(self):
return self
def __str__(self):
return f'Direct Message with {self.recipient}'
def __repr__(self):
return '<DMChannel id={0.id} recipient={0.recipient!r}>'.format(self)
@property
def type(self):
return ChannelType.private
@property
def created_at(self):
return utils.snowflake_time(self.id)
def permissions_for(self, user=None):
base = Permissions.text()
base.read_messages = True
base.send_tts_messages = False
base.manage_messages = False
return base
def get_partial_message(self, message_id):
from .message import PartialMessage
return PartialMessage(channel=self, id=message_id)
class GroupChannel(discord.abc.Messageable, Hashable):
__slots__ = ('id', 'recipients', 'owner', 'icon', 'name', 'me', '_state')
def __init__(self, *, me, state, data):
self._state = state
self.id = int(data['id'])
self.me = me
self._update_group(data)
def _update_group(self, data):
owner_id = utils._get_as_snowflake(data, 'owner_id')
self.icon = data.get('icon')
self.name = data.get('name')
try:
self.recipients = [self._state.store_user(u) for u in data['recipients']]
except KeyError:
pass
if owner_id == self.me.id:
self.owner = self.me
else:
self.owner = utils.find(lambda u: u.id == owner_id, self.recipients)
async def _get_channel(self):
return self
def __str__(self):
if self.name:
return self.name
if len(self.recipients) == 0:
return 'Unnamed'
return ', '.join(map(lambda x: x.name, self.recipients))
def __repr__(self):
return '<GroupChannel id={0.id} name={0.name!r}>'.format(self)
@property
def type(self):
return ChannelType.group
@property
def icon_url(self):
return self.icon_url_as()
def icon_url_as(self, *, format='webp', size=1024):
return Asset._from_icon(self._state, self, 'channel', format=format, size=size)
@property
def created_at(self):
return utils.snowflake_time(self.id)
def permissions_for(self, user):
base = Permissions.text()
base.read_messages = True
base.send_tts_messages = False
base.manage_messages = False
base.mention_everyone = True
if user.id == self.owner.id:
base.kick_members = True
return base
async def leave(self):
await self._state.http.leave_group(self.id)
class Party:
def __init__(self, data):
self.code = data['code']
self.uses = data['uses']
self.max_uses = data['max_uses']
self.max_age = data['max_age']
self.temporary = data['temporary']
self.created_at = utils.parse_time(data.get('created_at'))
def __repr__(self):
return f'<Party code={self.code}>'
def __str__(self):
return f'https://discord.gg/{self.code}'
def __eq__(self, other):
return isinstance(other, Party) and self.code == other.code
def _channel_factory(channel_type):
value = try_enum(ChannelType, channel_type)
if value is ChannelType.text:
return TextChannel, value
elif value is ChannelType.voice:
return VoiceChannel, value
elif value is ChannelType.private:
return DMChannel, value
elif value is ChannelType.category:
return CategoryChannel, value
elif value is ChannelType.group:
return GroupChannel, value
elif value is ChannelType.news:
return TextChannel, value
elif value is ChannelType.store:
return StoreChannel, value
elif value is ChannelType.stage_voice:
return StageChannel, value
else:
return None, value
| true
| true
|
1c414808ed560fff7c12a3bf6402cdaca7a2f7f6
| 504
|
py
|
Python
|
project_name/wsgi.py
|
se42/djroku-temp1
|
97293faccad1aaf9a7ad0a7ee9d05066ece286c7
|
[
"MIT"
] | 14
|
2016-07-05T15:08:24.000Z
|
2020-10-10T16:52:17.000Z
|
project_name/wsgi.py
|
se42/djroku-temp1
|
97293faccad1aaf9a7ad0a7ee9d05066ece286c7
|
[
"MIT"
] | 1
|
2019-03-14T00:48:17.000Z
|
2019-03-14T00:48:17.000Z
|
project_name/wsgi.py
|
se42/djroku-temp1
|
97293faccad1aaf9a7ad0a7ee9d05066ece286c7
|
[
"MIT"
] | 5
|
2016-07-08T12:42:25.000Z
|
2020-12-15T06:56:17.000Z
|
"""
WSGI config for {{ project_name }} project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "{{ project_name }}.settings")
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
| 26.526316
| 78
| 0.787698
|
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "{{ project_name }}.settings")
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
| true
| true
|
1c41488973978a7c770aed118e7b8cd0c1c01af0
| 36,199
|
py
|
Python
|
pandas/core/indexes/datetimelike.py
|
NeehaK/pandas
|
0815c433b58920c658f1be9c7eb00cf7e75a3e2b
|
[
"BSD-3-Clause"
] | 1
|
2019-04-27T15:15:15.000Z
|
2019-04-27T15:15:15.000Z
|
pandas/core/indexes/datetimelike.py
|
NeehaK/pandas
|
0815c433b58920c658f1be9c7eb00cf7e75a3e2b
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/core/indexes/datetimelike.py
|
NeehaK/pandas
|
0815c433b58920c658f1be9c7eb00cf7e75a3e2b
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Base and utility classes for tseries type pandas objects.
"""
import warnings
import operator
from datetime import datetime, timedelta
from pandas import compat
from pandas.compat.numpy import function as nv
from pandas.core.tools.timedeltas import to_timedelta
import numpy as np
from pandas._libs import lib, iNaT, NaT
from pandas._libs.tslibs.period import Period
from pandas._libs.tslibs.timedeltas import delta_to_nanoseconds
from pandas._libs.tslibs.timestamps import round_ns
from pandas.core.dtypes.common import (
_ensure_int64,
is_dtype_equal,
is_float,
is_integer,
is_list_like,
is_scalar,
is_bool_dtype,
is_offsetlike,
is_categorical_dtype,
is_datetime_or_timedelta_dtype,
is_float_dtype,
is_integer_dtype,
is_object_dtype,
is_string_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_period_dtype,
is_timedelta64_dtype)
from pandas.core.dtypes.generic import (
ABCIndex, ABCSeries, ABCDataFrame, ABCPeriodIndex, ABCIndexClass)
from pandas.core.dtypes.missing import isna
from pandas.core import common as com, algorithms, ops
from pandas.core.algorithms import checked_add_with_arr
from pandas.errors import NullFrequencyError, PerformanceWarning
import pandas.io.formats.printing as printing
from pandas.core.indexes.base import Index, _index_shared_docs
from pandas.util._decorators import Appender, cache_readonly
import pandas.core.dtypes.concat as _concat
import pandas.tseries.frequencies as frequencies
from pandas.tseries.offsets import Tick, DateOffset
import pandas.core.indexes.base as ibase
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
class DatelikeOps(object):
""" common ops for DatetimeIndex/PeriodIndex, but not TimedeltaIndex """
def strftime(self, date_format):
return np.asarray(self.format(date_format=date_format),
dtype=compat.text_type)
strftime.__doc__ = """
Return an array of formatted strings specified by date_format, which
supports the same string format as the python standard library. Details
of the string format can be found in `python string format doc <{0}>`__
Parameters
----------
date_format : str
date format string (e.g. "%Y-%m-%d")
Returns
-------
ndarray of formatted strings
""".format("https://docs.python.org/3/library/datetime.html"
"#strftime-and-strptime-behavior")
class TimelikeOps(object):
""" common ops for TimedeltaIndex/DatetimeIndex, but not PeriodIndex """
_round_doc = (
"""
{op} the data to the specified `freq`.
Parameters
----------
freq : str or Offset
The frequency level to {op} the index to. Must be a fixed
frequency like 'S' (second) not 'ME' (month end). See
:ref:`frequency aliases <timeseries.offset_aliases>` for
a list of possible `freq` values.
Returns
-------
DatetimeIndex, TimedeltaIndex, or Series
Index of the same type for a DatetimeIndex or TimedeltaIndex,
or a Series with the same index for a Series.
Raises
------
ValueError if the `freq` cannot be converted.
Examples
--------
**DatetimeIndex**
>>> rng = pd.date_range('1/1/2018 11:59:00', periods=3, freq='min')
>>> rng
DatetimeIndex(['2018-01-01 11:59:00', '2018-01-01 12:00:00',
'2018-01-01 12:01:00'],
dtype='datetime64[ns]', freq='T')
""")
_round_example = (
""">>> rng.round('H')
DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00',
'2018-01-01 12:00:00'],
dtype='datetime64[ns]', freq=None)
**Series**
>>> pd.Series(rng).dt.round("H")
0 2018-01-01 12:00:00
1 2018-01-01 12:00:00
2 2018-01-01 12:00:00
dtype: datetime64[ns]
""")
_floor_example = (
""">>> rng.floor('H')
DatetimeIndex(['2018-01-01 11:00:00', '2018-01-01 12:00:00',
'2018-01-01 12:00:00'],
dtype='datetime64[ns]', freq=None)
**Series**
>>> pd.Series(rng).dt.floor("H")
0 2018-01-01 11:00:00
1 2018-01-01 12:00:00
2 2018-01-01 12:00:00
dtype: datetime64[ns]
"""
)
_ceil_example = (
""">>> rng.ceil('H')
DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00',
'2018-01-01 13:00:00'],
dtype='datetime64[ns]', freq=None)
**Series**
>>> pd.Series(rng).dt.ceil("H")
0 2018-01-01 12:00:00
1 2018-01-01 12:00:00
2 2018-01-01 13:00:00
dtype: datetime64[ns]
"""
)
def _round(self, freq, rounder):
# round the local times
values = _ensure_datetimelike_to_i8(self)
result = round_ns(values, rounder, freq)
result = self._maybe_mask_results(result, fill_value=NaT)
attribs = self._get_attributes_dict()
if 'freq' in attribs:
attribs['freq'] = None
if 'tz' in attribs:
attribs['tz'] = None
return self._ensure_localized(
self._shallow_copy(result, **attribs))
@Appender((_round_doc + _round_example).format(op="round"))
def round(self, freq, *args, **kwargs):
return self._round(freq, np.round)
@Appender((_round_doc + _floor_example).format(op="floor"))
def floor(self, freq):
return self._round(freq, np.floor)
@Appender((_round_doc + _ceil_example).format(op="ceil"))
def ceil(self, freq):
return self._round(freq, np.ceil)
class DatetimeIndexOpsMixin(object):
""" common ops mixin to support a unified interface datetimelike Index """
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self.is_(other):
return True
if not isinstance(other, ABCIndexClass):
return False
elif not isinstance(other, type(self)):
try:
other = type(self)(other)
except Exception:
return False
if not is_dtype_equal(self.dtype, other.dtype):
# have different timezone
return False
# ToDo: Remove this when PeriodDtype is added
elif isinstance(self, ABCPeriodIndex):
if not isinstance(other, ABCPeriodIndex):
return False
if self.freq != other.freq:
return False
return np.array_equal(self.asi8, other.asi8)
def __iter__(self):
return (self._box_func(v) for v in self.asi8)
@staticmethod
def _join_i8_wrapper(joinf, dtype, with_indexers=True):
""" create the join wrapper methods """
@staticmethod
def wrapper(left, right):
if isinstance(left, (np.ndarray, ABCIndex, ABCSeries)):
left = left.view('i8')
if isinstance(right, (np.ndarray, ABCIndex, ABCSeries)):
right = right.view('i8')
results = joinf(left, right)
if with_indexers:
join_index, left_indexer, right_indexer = results
join_index = join_index.view(dtype)
return join_index, left_indexer, right_indexer
return results
return wrapper
def _evaluate_compare(self, other, op):
"""
We have been called because a comparison between
8 aware arrays. numpy >= 1.11 will
now warn about NaT comparisons
"""
# coerce to a similar object
if not isinstance(other, type(self)):
if not is_list_like(other):
# scalar
other = [other]
elif is_scalar(lib.item_from_zerodim(other)):
# ndarray scalar
other = [other.item()]
other = type(self)(other)
# compare
result = op(self.asi8, other.asi8)
# technically we could support bool dtyped Index
# for now just return the indexing array directly
mask = (self._isnan) | (other._isnan)
if is_bool_dtype(result):
result[mask] = False
return result
result[mask] = iNaT
try:
return Index(result)
except TypeError:
return result
def _ensure_localized(self, result):
"""
ensure that we are re-localized
This is for compat as we can then call this on all datetimelike
indexes generally (ignored for Period/Timedelta)
Parameters
----------
result : DatetimeIndex / i8 ndarray
Returns
-------
localized DTI
"""
# reconvert to local tz
if getattr(self, 'tz', None) is not None:
if not isinstance(result, ABCIndexClass):
result = self._simple_new(result)
result = result.tz_localize(self.tz)
return result
@property
def _box_func(self):
"""
box function to get object from internal representation
"""
raise com.AbstractMethodError(self)
def _box_values(self, values):
"""
apply box func to passed values
"""
return lib.map_infer(values, self._box_func)
def _box_values_as_index(self):
"""
return object Index which contains boxed values
"""
from pandas.core.index import Index
return Index(self._box_values(self.asi8), name=self.name, dtype=object)
def _format_with_header(self, header, **kwargs):
return header + list(self._format_native_types(**kwargs))
@Appender(_index_shared_docs['__contains__'] % _index_doc_kwargs)
def __contains__(self, key):
try:
res = self.get_loc(key)
return is_scalar(res) or type(res) == slice or np.any(res)
except (KeyError, TypeError, ValueError):
return False
contains = __contains__
def __getitem__(self, key):
"""
This getitem defers to the underlying array, which by-definition can
only handle list-likes, slices, and integer scalars
"""
is_int = is_integer(key)
if is_scalar(key) and not is_int:
raise IndexError("only integers, slices (`:`), ellipsis (`...`), "
"numpy.newaxis (`None`) and integer or boolean "
"arrays are valid indices")
getitem = self._data.__getitem__
if is_int:
val = getitem(key)
return self._box_func(val)
else:
if com.is_bool_indexer(key):
key = np.asarray(key)
if key.all():
key = slice(0, None, None)
else:
key = lib.maybe_booleans_to_slice(key.view(np.uint8))
attribs = self._get_attributes_dict()
is_period = isinstance(self, ABCPeriodIndex)
if is_period:
freq = self.freq
else:
freq = None
if isinstance(key, slice):
if self.freq is not None and key.step is not None:
freq = key.step * self.freq
else:
freq = self.freq
attribs['freq'] = freq
result = getitem(key)
if result.ndim > 1:
# To support MPL which performs slicing with 2 dim
# even though it only has 1 dim by definition
if is_period:
return self._simple_new(result, **attribs)
return result
return self._simple_new(result, **attribs)
@property
def freqstr(self):
"""
Return the frequency object as a string if its set, otherwise None
"""
if self.freq is None:
return None
return self.freq.freqstr
@cache_readonly
def inferred_freq(self):
"""
Tryies to return a string representing a frequency guess,
generated by infer_freq. Returns None if it can't autodetect the
frequency.
"""
try:
return frequencies.infer_freq(self)
except ValueError:
return None
def _nat_new(self, box=True):
"""
Return Index or ndarray filled with NaT which has the same
length as the caller.
Parameters
----------
box : boolean, default True
- If True returns a Index as the same as caller.
- If False returns ndarray of np.int64.
"""
result = np.zeros(len(self), dtype=np.int64)
result.fill(iNaT)
if not box:
return result
attribs = self._get_attributes_dict()
if not is_period_dtype(self):
attribs['freq'] = None
return self._simple_new(result, **attribs)
# Try to run function on index first, and then on elements of index
# Especially important for group-by functionality
def map(self, f):
try:
result = f(self)
# Try to use this result if we can
if isinstance(result, np.ndarray):
result = Index(result)
if not isinstance(result, Index):
raise TypeError('The map function must return an Index object')
return result
except Exception:
return self.astype(object).map(f)
def sort_values(self, return_indexer=False, ascending=True):
"""
Return sorted copy of Index
"""
if return_indexer:
_as = self.argsort()
if not ascending:
_as = _as[::-1]
sorted_index = self.take(_as)
return sorted_index, _as
else:
sorted_values = np.sort(self._ndarray_values)
attribs = self._get_attributes_dict()
freq = attribs['freq']
if freq is not None and not isinstance(self, ABCPeriodIndex):
if freq.n > 0 and not ascending:
freq = freq * -1
elif freq.n < 0 and ascending:
freq = freq * -1
attribs['freq'] = freq
if not ascending:
sorted_values = sorted_values[::-1]
return self._simple_new(sorted_values, **attribs)
@Appender(_index_shared_docs['take'] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True,
fill_value=None, **kwargs):
nv.validate_take(tuple(), kwargs)
indices = _ensure_int64(indices)
maybe_slice = lib.maybe_indices_to_slice(indices, len(self))
if isinstance(maybe_slice, slice):
return self[maybe_slice]
taken = self._assert_take_fillable(self.asi8, indices,
allow_fill=allow_fill,
fill_value=fill_value,
na_value=iNaT)
# keep freq in PeriodIndex, reset otherwise
freq = self.freq if isinstance(self, ABCPeriodIndex) else None
return self._shallow_copy(taken, freq=freq)
def get_duplicates(self):
values = Index.get_duplicates(self)
return self._simple_new(values)
_can_hold_na = True
_na_value = NaT
"""The expected NA value to use with this index."""
@cache_readonly
def _isnan(self):
""" return if each value is nan"""
return (self.asi8 == iNaT)
@property
def asobject(self):
"""Return object Index which contains boxed values.
.. deprecated:: 0.23.0
Use ``astype(object)`` instead.
*this is an internal non-public method*
"""
warnings.warn("'asobject' is deprecated. Use 'astype(object)'"
" instead", FutureWarning, stacklevel=2)
return self.astype(object)
def _convert_tolerance(self, tolerance, target):
tolerance = np.asarray(to_timedelta(tolerance, box=False))
if target.size != tolerance.size and tolerance.size > 1:
raise ValueError('list-like tolerance size must match '
'target index size')
return tolerance
def _maybe_mask_results(self, result, fill_value=None, convert=None):
"""
Parameters
----------
result : a ndarray
convert : string/dtype or None
Returns
-------
result : ndarray with values replace by the fill_value
mask the result if needed, convert to the provided dtype if its not
None
This is an internal routine
"""
if self.hasnans:
if convert:
result = result.astype(convert)
if fill_value is None:
fill_value = np.nan
result[self._isnan] = fill_value
return result
def tolist(self):
"""
return a list of the underlying data
"""
return list(self.astype(object))
def min(self, axis=None, *args, **kwargs):
"""
Return the minimum value of the Index or minimum along
an axis.
See also
--------
numpy.ndarray.min
"""
nv.validate_min(args, kwargs)
try:
i8 = self.asi8
# quick check
if len(i8) and self.is_monotonic:
if i8[0] != iNaT:
return self._box_func(i8[0])
if self.hasnans:
min_stamp = self[~self._isnan].asi8.min()
else:
min_stamp = i8.min()
return self._box_func(min_stamp)
except ValueError:
return self._na_value
def argmin(self, axis=None, *args, **kwargs):
"""
Returns the indices of the minimum values along an axis.
See `numpy.ndarray.argmin` for more information on the
`axis` parameter.
See also
--------
numpy.ndarray.argmin
"""
nv.validate_argmin(args, kwargs)
i8 = self.asi8
if self.hasnans:
mask = self._isnan
if mask.all():
return -1
i8 = i8.copy()
i8[mask] = np.iinfo('int64').max
return i8.argmin()
def max(self, axis=None, *args, **kwargs):
"""
Return the maximum value of the Index or maximum along
an axis.
See also
--------
numpy.ndarray.max
"""
nv.validate_max(args, kwargs)
try:
i8 = self.asi8
# quick check
if len(i8) and self.is_monotonic:
if i8[-1] != iNaT:
return self._box_func(i8[-1])
if self.hasnans:
max_stamp = self[~self._isnan].asi8.max()
else:
max_stamp = i8.max()
return self._box_func(max_stamp)
except ValueError:
return self._na_value
def argmax(self, axis=None, *args, **kwargs):
"""
Returns the indices of the maximum values along an axis.
See `numpy.ndarray.argmax` for more information on the
`axis` parameter.
See also
--------
numpy.ndarray.argmax
"""
nv.validate_argmax(args, kwargs)
i8 = self.asi8
if self.hasnans:
mask = self._isnan
if mask.all():
return -1
i8 = i8.copy()
i8[mask] = 0
return i8.argmax()
@property
def _formatter_func(self):
raise com.AbstractMethodError(self)
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value)
"""
attrs = super(DatetimeIndexOpsMixin, self)._format_attrs()
for attrib in self._attributes:
if attrib == 'freq':
freq = self.freqstr
if freq is not None:
freq = "'%s'" % freq
attrs.append(('freq', freq))
return attrs
@cache_readonly
def _resolution(self):
return frequencies.Resolution.get_reso_from_freq(self.freqstr)
@cache_readonly
def resolution(self):
"""
Returns day, hour, minute, second, millisecond or microsecond
"""
return frequencies.Resolution.get_str(self._resolution)
def _convert_scalar_indexer(self, key, kind=None):
"""
we don't allow integer or float indexing on datetime-like when using
loc
Parameters
----------
key : label of the slice bound
kind : {'ix', 'loc', 'getitem', 'iloc'} or None
"""
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
# we don't allow integer/float indexing for loc
# we don't allow float indexing for ix/getitem
if is_scalar(key):
is_int = is_integer(key)
is_flt = is_float(key)
if kind in ['loc'] and (is_int or is_flt):
self._invalid_indexer('index', key)
elif kind in ['ix', 'getitem'] and is_flt:
self._invalid_indexer('index', key)
return (super(DatetimeIndexOpsMixin, self)
._convert_scalar_indexer(key, kind=kind))
def _add_datelike(self, other):
raise TypeError("cannot add {cls} and {typ}"
.format(cls=type(self).__name__,
typ=type(other).__name__))
def _sub_datelike(self, other):
raise com.AbstractMethodError(self)
def _add_nat(self):
"""Add pd.NaT to self"""
if is_period_dtype(self):
raise TypeError('Cannot add {cls} and {typ}'
.format(cls=type(self).__name__,
typ=type(NaT).__name__))
# GH#19124 pd.NaT is treated like a timedelta for both timedelta
# and datetime dtypes
return self._nat_new(box=True)
def _sub_nat(self):
"""Subtract pd.NaT from self"""
# GH#19124 Timedelta - datetime is not in general well-defined.
# We make an exception for pd.NaT, which in this case quacks
# like a timedelta.
# For datetime64 dtypes by convention we treat NaT as a datetime, so
# this subtraction returns a timedelta64 dtype.
# For period dtype, timedelta64 is a close-enough return dtype.
result = self._nat_new(box=False)
return result.view('timedelta64[ns]')
def _sub_period(self, other):
return NotImplemented
def _add_offset(self, offset):
raise com.AbstractMethodError(self)
def _addsub_offset_array(self, other, op):
"""
Add or subtract array-like of DateOffset objects
Parameters
----------
other : Index, np.ndarray
object-dtype containing pd.DateOffset objects
op : {operator.add, operator.sub}
Returns
-------
result : same class as self
"""
assert op in [operator.add, operator.sub]
if len(other) == 1:
return op(self, other[0])
warnings.warn("Adding/subtracting array of DateOffsets to "
"{cls} not vectorized"
.format(cls=type(self).__name__), PerformanceWarning)
res_values = op(self.astype('O').values, np.array(other))
kwargs = {}
if not is_period_dtype(self):
kwargs['freq'] = 'infer'
return self._constructor(res_values, **kwargs)
@classmethod
def _add_datetimelike_methods(cls):
"""
add in the datetimelike methods (as we may have to override the
superclass)
"""
def __add__(self, other):
from pandas import DateOffset
other = lib.item_from_zerodim(other)
if isinstance(other, (ABCSeries, ABCDataFrame)):
return NotImplemented
# scalar others
elif other is NaT:
result = self._add_nat()
elif isinstance(other, (Tick, timedelta, np.timedelta64)):
result = self._add_delta(other)
elif isinstance(other, DateOffset):
# specifically _not_ a Tick
result = self._add_offset(other)
elif isinstance(other, (datetime, np.datetime64)):
result = self._add_datelike(other)
elif is_integer(other):
# This check must come after the check for np.timedelta64
# as is_integer returns True for these
result = self.shift(other)
# array-like others
elif is_timedelta64_dtype(other):
# TimedeltaIndex, ndarray[timedelta64]
result = self._add_delta(other)
elif is_offsetlike(other):
# Array/Index of DateOffset objects
result = self._addsub_offset_array(other, operator.add)
elif is_datetime64_dtype(other) or is_datetime64tz_dtype(other):
# DatetimeIndex, ndarray[datetime64]
return self._add_datelike(other)
elif is_integer_dtype(other) and self.freq is None:
# GH#19123
raise NullFrequencyError("Cannot shift with no freq")
elif is_float_dtype(other):
# Explicitly catch invalid dtypes
raise TypeError("cannot add {dtype}-dtype to {cls}"
.format(dtype=other.dtype,
cls=type(self).__name__))
else: # pragma: no cover
return NotImplemented
if result is NotImplemented:
return NotImplemented
elif not isinstance(result, Index):
# Index.__new__ will choose appropriate subclass for dtype
result = Index(result)
res_name = ops.get_op_result_name(self, other)
result.name = res_name
return result
cls.__add__ = __add__
def __radd__(self, other):
# alias for __add__
return self.__add__(other)
cls.__radd__ = __radd__
def __sub__(self, other):
from pandas import Index
other = lib.item_from_zerodim(other)
if isinstance(other, (ABCSeries, ABCDataFrame)):
return NotImplemented
# scalar others
elif other is NaT:
result = self._sub_nat()
elif isinstance(other, (Tick, timedelta, np.timedelta64)):
result = self._add_delta(-other)
elif isinstance(other, DateOffset):
# specifically _not_ a Tick
result = self._add_offset(-other)
elif isinstance(other, (datetime, np.datetime64)):
result = self._sub_datelike(other)
elif is_integer(other):
# This check must come after the check for np.timedelta64
# as is_integer returns True for these
result = self.shift(-other)
elif isinstance(other, Period):
result = self._sub_period(other)
# array-like others
elif is_timedelta64_dtype(other):
# TimedeltaIndex, ndarray[timedelta64]
result = self._add_delta(-other)
elif is_offsetlike(other):
# Array/Index of DateOffset objects
result = self._addsub_offset_array(other, operator.sub)
elif is_datetime64_dtype(other) or is_datetime64tz_dtype(other):
# DatetimeIndex, ndarray[datetime64]
result = self._sub_datelike(other)
elif isinstance(other, Index):
raise TypeError("cannot subtract {cls} and {typ}"
.format(cls=type(self).__name__,
typ=type(other).__name__))
elif is_integer_dtype(other) and self.freq is None:
# GH#19123
raise NullFrequencyError("Cannot shift with no freq")
elif is_float_dtype(other):
# Explicitly catch invalid dtypes
raise TypeError("cannot subtract {dtype}-dtype from {cls}"
.format(dtype=other.dtype,
cls=type(self).__name__))
else: # pragma: no cover
return NotImplemented
if result is NotImplemented:
return NotImplemented
elif not isinstance(result, Index):
# Index.__new__ will choose appropriate subclass for dtype
result = Index(result)
res_name = ops.get_op_result_name(self, other)
result.name = res_name
return result
cls.__sub__ = __sub__
def __rsub__(self, other):
if is_datetime64_dtype(other) and is_timedelta64_dtype(self):
# ndarray[datetime64] cannot be subtracted from self, so
# we need to wrap in DatetimeIndex and flip the operation
from pandas import DatetimeIndex
return DatetimeIndex(other) - self
return -(self - other)
cls.__rsub__ = __rsub__
def __iadd__(self, other):
# alias for __add__
return self.__add__(other)
cls.__iadd__ = __iadd__
def __isub__(self, other):
# alias for __sub__
return self.__sub__(other)
cls.__isub__ = __isub__
def _add_delta(self, other):
return NotImplemented
def _add_delta_td(self, other):
"""
Add a delta of a timedeltalike
return the i8 result view
"""
inc = delta_to_nanoseconds(other)
new_values = checked_add_with_arr(self.asi8, inc,
arr_mask=self._isnan).view('i8')
if self.hasnans:
new_values[self._isnan] = iNaT
return new_values.view('i8')
def _add_delta_tdi(self, other):
"""
Add a delta of a TimedeltaIndex
return the i8 result view
"""
# delta operation
if not len(self) == len(other):
raise ValueError("cannot add indices of unequal length")
self_i8 = self.asi8
other_i8 = other.asi8
new_values = checked_add_with_arr(self_i8, other_i8,
arr_mask=self._isnan,
b_mask=other._isnan)
if self.hasnans or other.hasnans:
mask = (self._isnan) | (other._isnan)
new_values[mask] = iNaT
return new_values.view('i8')
def isin(self, values):
"""
Compute boolean array of whether each index value is found in the
passed set of values
Parameters
----------
values : set or sequence of values
Returns
-------
is_contained : ndarray (boolean dtype)
"""
if not isinstance(values, type(self)):
try:
values = type(self)(values)
except ValueError:
return self.astype(object).isin(values)
return algorithms.isin(self.asi8, values.asi8)
def shift(self, n, freq=None):
"""
Specialized shift which produces a DatetimeIndex
Parameters
----------
n : int
Periods to shift by
freq : DateOffset or timedelta-like, optional
Returns
-------
shifted : DatetimeIndex
"""
if freq is not None and freq != self.freq:
if isinstance(freq, compat.string_types):
freq = frequencies.to_offset(freq)
offset = n * freq
result = self + offset
if hasattr(self, 'tz'):
result.tz = self.tz
return result
if n == 0:
# immutable so OK
return self
if self.freq is None:
raise NullFrequencyError("Cannot shift with no freq")
start = self[0] + n * self.freq
end = self[-1] + n * self.freq
attribs = self._get_attributes_dict()
attribs['start'] = start
attribs['end'] = end
return type(self)(**attribs)
def repeat(self, repeats, *args, **kwargs):
"""
Analogous to ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
if isinstance(self, ABCPeriodIndex):
freq = self.freq
else:
freq = None
return self._shallow_copy(self.asi8.repeat(repeats),
freq=freq)
@Appender(_index_shared_docs['where'] % _index_doc_kwargs)
def where(self, cond, other=None):
other = _ensure_datetimelike_to_i8(other)
values = _ensure_datetimelike_to_i8(self)
result = np.where(cond, values, other).astype('i8')
result = self._ensure_localized(result)
return self._shallow_copy(result,
**self._get_attributes_dict())
def summary(self, name=None):
"""
return a summarized representation
"""
formatter = self._formatter_func
if len(self) > 0:
index_summary = ', %s to %s' % (formatter(self[0]),
formatter(self[-1]))
else:
index_summary = ''
if name is None:
name = type(self).__name__
result = '%s: %s entries%s' % (printing.pprint_thing(name),
len(self), index_summary)
if self.freq:
result += '\nFreq: %s' % self.freqstr
# display as values, not quoted
result = result.replace("'", "")
return result
def _concat_same_dtype(self, to_concat, name):
"""
Concatenate to_concat which has the same class
"""
attribs = self._get_attributes_dict()
attribs['name'] = name
if not isinstance(self, ABCPeriodIndex):
# reset freq
attribs['freq'] = None
if getattr(self, 'tz', None) is not None:
return _concat._concat_datetimetz(to_concat, name)
else:
new_data = np.concatenate([c.asi8 for c in to_concat])
return self._simple_new(new_data, **attribs)
def astype(self, dtype, copy=True):
if is_object_dtype(dtype):
return self._box_values_as_index()
elif is_string_dtype(dtype) and not is_categorical_dtype(dtype):
return Index(self.format(), name=self.name, dtype=object)
elif is_integer_dtype(dtype):
return Index(self.values.astype('i8', copy=copy), name=self.name,
dtype='i8')
elif (is_datetime_or_timedelta_dtype(dtype) and
not is_dtype_equal(self.dtype, dtype)) or is_float_dtype(dtype):
# disallow conversion between datetime/timedelta,
# and conversions for any datetimelike to float
msg = 'Cannot cast {name} to dtype {dtype}'
raise TypeError(msg.format(name=type(self).__name__, dtype=dtype))
return super(DatetimeIndexOpsMixin, self).astype(dtype, copy=copy)
def _ensure_datetimelike_to_i8(other):
""" helper for coercing an input scalar or array to i8 """
if is_scalar(other) and isna(other):
other = iNaT
elif isinstance(other, ABCIndexClass):
# convert tz if needed
if getattr(other, 'tz', None) is not None:
other = other.tz_localize(None).asi8
else:
other = other.asi8
else:
try:
other = np.array(other, copy=False).view('i8')
except TypeError:
# period array cannot be coerces to int
other = Index(other).asi8
return other
| 32.848457
| 79
| 0.56148
|
import warnings
import operator
from datetime import datetime, timedelta
from pandas import compat
from pandas.compat.numpy import function as nv
from pandas.core.tools.timedeltas import to_timedelta
import numpy as np
from pandas._libs import lib, iNaT, NaT
from pandas._libs.tslibs.period import Period
from pandas._libs.tslibs.timedeltas import delta_to_nanoseconds
from pandas._libs.tslibs.timestamps import round_ns
from pandas.core.dtypes.common import (
_ensure_int64,
is_dtype_equal,
is_float,
is_integer,
is_list_like,
is_scalar,
is_bool_dtype,
is_offsetlike,
is_categorical_dtype,
is_datetime_or_timedelta_dtype,
is_float_dtype,
is_integer_dtype,
is_object_dtype,
is_string_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_period_dtype,
is_timedelta64_dtype)
from pandas.core.dtypes.generic import (
ABCIndex, ABCSeries, ABCDataFrame, ABCPeriodIndex, ABCIndexClass)
from pandas.core.dtypes.missing import isna
from pandas.core import common as com, algorithms, ops
from pandas.core.algorithms import checked_add_with_arr
from pandas.errors import NullFrequencyError, PerformanceWarning
import pandas.io.formats.printing as printing
from pandas.core.indexes.base import Index, _index_shared_docs
from pandas.util._decorators import Appender, cache_readonly
import pandas.core.dtypes.concat as _concat
import pandas.tseries.frequencies as frequencies
from pandas.tseries.offsets import Tick, DateOffset
import pandas.core.indexes.base as ibase
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
class DatelikeOps(object):
def strftime(self, date_format):
return np.asarray(self.format(date_format=date_format),
dtype=compat.text_type)
strftime.__doc__ = """
Return an array of formatted strings specified by date_format, which
supports the same string format as the python standard library. Details
of the string format can be found in `python string format doc <{0}>`__
Parameters
----------
date_format : str
date format string (e.g. "%Y-%m-%d")
Returns
-------
ndarray of formatted strings
""".format("https://docs.python.org/3/library/datetime.html"
"#strftime-and-strptime-behavior")
class TimelikeOps(object):
_round_doc = (
"""
{op} the data to the specified `freq`.
Parameters
----------
freq : str or Offset
The frequency level to {op} the index to. Must be a fixed
frequency like 'S' (second) not 'ME' (month end). See
:ref:`frequency aliases <timeseries.offset_aliases>` for
a list of possible `freq` values.
Returns
-------
DatetimeIndex, TimedeltaIndex, or Series
Index of the same type for a DatetimeIndex or TimedeltaIndex,
or a Series with the same index for a Series.
Raises
------
ValueError if the `freq` cannot be converted.
Examples
--------
**DatetimeIndex**
>>> rng = pd.date_range('1/1/2018 11:59:00', periods=3, freq='min')
>>> rng
DatetimeIndex(['2018-01-01 11:59:00', '2018-01-01 12:00:00',
'2018-01-01 12:01:00'],
dtype='datetime64[ns]', freq='T')
""")
_round_example = (
""">>> rng.round('H')
DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00',
'2018-01-01 12:00:00'],
dtype='datetime64[ns]', freq=None)
**Series**
>>> pd.Series(rng).dt.round("H")
0 2018-01-01 12:00:00
1 2018-01-01 12:00:00
2 2018-01-01 12:00:00
dtype: datetime64[ns]
""")
_floor_example = (
""">>> rng.floor('H')
DatetimeIndex(['2018-01-01 11:00:00', '2018-01-01 12:00:00',
'2018-01-01 12:00:00'],
dtype='datetime64[ns]', freq=None)
**Series**
>>> pd.Series(rng).dt.floor("H")
0 2018-01-01 11:00:00
1 2018-01-01 12:00:00
2 2018-01-01 12:00:00
dtype: datetime64[ns]
"""
)
_ceil_example = (
""">>> rng.ceil('H')
DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00',
'2018-01-01 13:00:00'],
dtype='datetime64[ns]', freq=None)
**Series**
>>> pd.Series(rng).dt.ceil("H")
0 2018-01-01 12:00:00
1 2018-01-01 12:00:00
2 2018-01-01 13:00:00
dtype: datetime64[ns]
"""
)
def _round(self, freq, rounder):
values = _ensure_datetimelike_to_i8(self)
result = round_ns(values, rounder, freq)
result = self._maybe_mask_results(result, fill_value=NaT)
attribs = self._get_attributes_dict()
if 'freq' in attribs:
attribs['freq'] = None
if 'tz' in attribs:
attribs['tz'] = None
return self._ensure_localized(
self._shallow_copy(result, **attribs))
@Appender((_round_doc + _round_example).format(op="round"))
def round(self, freq, *args, **kwargs):
return self._round(freq, np.round)
@Appender((_round_doc + _floor_example).format(op="floor"))
def floor(self, freq):
return self._round(freq, np.floor)
@Appender((_round_doc + _ceil_example).format(op="ceil"))
def ceil(self, freq):
return self._round(freq, np.ceil)
class DatetimeIndexOpsMixin(object):
def equals(self, other):
if self.is_(other):
return True
if not isinstance(other, ABCIndexClass):
return False
elif not isinstance(other, type(self)):
try:
other = type(self)(other)
except Exception:
return False
if not is_dtype_equal(self.dtype, other.dtype):
return False
elif isinstance(self, ABCPeriodIndex):
if not isinstance(other, ABCPeriodIndex):
return False
if self.freq != other.freq:
return False
return np.array_equal(self.asi8, other.asi8)
def __iter__(self):
return (self._box_func(v) for v in self.asi8)
@staticmethod
def _join_i8_wrapper(joinf, dtype, with_indexers=True):
@staticmethod
def wrapper(left, right):
if isinstance(left, (np.ndarray, ABCIndex, ABCSeries)):
left = left.view('i8')
if isinstance(right, (np.ndarray, ABCIndex, ABCSeries)):
right = right.view('i8')
results = joinf(left, right)
if with_indexers:
join_index, left_indexer, right_indexer = results
join_index = join_index.view(dtype)
return join_index, left_indexer, right_indexer
return results
return wrapper
def _evaluate_compare(self, other, op):
if not isinstance(other, type(self)):
if not is_list_like(other):
other = [other]
elif is_scalar(lib.item_from_zerodim(other)):
other = [other.item()]
other = type(self)(other)
result = op(self.asi8, other.asi8)
mask = (self._isnan) | (other._isnan)
if is_bool_dtype(result):
result[mask] = False
return result
result[mask] = iNaT
try:
return Index(result)
except TypeError:
return result
def _ensure_localized(self, result):
if getattr(self, 'tz', None) is not None:
if not isinstance(result, ABCIndexClass):
result = self._simple_new(result)
result = result.tz_localize(self.tz)
return result
@property
def _box_func(self):
raise com.AbstractMethodError(self)
def _box_values(self, values):
return lib.map_infer(values, self._box_func)
def _box_values_as_index(self):
from pandas.core.index import Index
return Index(self._box_values(self.asi8), name=self.name, dtype=object)
def _format_with_header(self, header, **kwargs):
return header + list(self._format_native_types(**kwargs))
@Appender(_index_shared_docs['__contains__'] % _index_doc_kwargs)
def __contains__(self, key):
try:
res = self.get_loc(key)
return is_scalar(res) or type(res) == slice or np.any(res)
except (KeyError, TypeError, ValueError):
return False
contains = __contains__
def __getitem__(self, key):
is_int = is_integer(key)
if is_scalar(key) and not is_int:
raise IndexError("only integers, slices (`:`), ellipsis (`...`), "
"numpy.newaxis (`None`) and integer or boolean "
"arrays are valid indices")
getitem = self._data.__getitem__
if is_int:
val = getitem(key)
return self._box_func(val)
else:
if com.is_bool_indexer(key):
key = np.asarray(key)
if key.all():
key = slice(0, None, None)
else:
key = lib.maybe_booleans_to_slice(key.view(np.uint8))
attribs = self._get_attributes_dict()
is_period = isinstance(self, ABCPeriodIndex)
if is_period:
freq = self.freq
else:
freq = None
if isinstance(key, slice):
if self.freq is not None and key.step is not None:
freq = key.step * self.freq
else:
freq = self.freq
attribs['freq'] = freq
result = getitem(key)
if result.ndim > 1:
if is_period:
return self._simple_new(result, **attribs)
return result
return self._simple_new(result, **attribs)
@property
def freqstr(self):
if self.freq is None:
return None
return self.freq.freqstr
@cache_readonly
def inferred_freq(self):
try:
return frequencies.infer_freq(self)
except ValueError:
return None
def _nat_new(self, box=True):
result = np.zeros(len(self), dtype=np.int64)
result.fill(iNaT)
if not box:
return result
attribs = self._get_attributes_dict()
if not is_period_dtype(self):
attribs['freq'] = None
return self._simple_new(result, **attribs)
def map(self, f):
try:
result = f(self)
if isinstance(result, np.ndarray):
result = Index(result)
if not isinstance(result, Index):
raise TypeError('The map function must return an Index object')
return result
except Exception:
return self.astype(object).map(f)
def sort_values(self, return_indexer=False, ascending=True):
if return_indexer:
_as = self.argsort()
if not ascending:
_as = _as[::-1]
sorted_index = self.take(_as)
return sorted_index, _as
else:
sorted_values = np.sort(self._ndarray_values)
attribs = self._get_attributes_dict()
freq = attribs['freq']
if freq is not None and not isinstance(self, ABCPeriodIndex):
if freq.n > 0 and not ascending:
freq = freq * -1
elif freq.n < 0 and ascending:
freq = freq * -1
attribs['freq'] = freq
if not ascending:
sorted_values = sorted_values[::-1]
return self._simple_new(sorted_values, **attribs)
@Appender(_index_shared_docs['take'] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True,
fill_value=None, **kwargs):
nv.validate_take(tuple(), kwargs)
indices = _ensure_int64(indices)
maybe_slice = lib.maybe_indices_to_slice(indices, len(self))
if isinstance(maybe_slice, slice):
return self[maybe_slice]
taken = self._assert_take_fillable(self.asi8, indices,
allow_fill=allow_fill,
fill_value=fill_value,
na_value=iNaT)
freq = self.freq if isinstance(self, ABCPeriodIndex) else None
return self._shallow_copy(taken, freq=freq)
def get_duplicates(self):
values = Index.get_duplicates(self)
return self._simple_new(values)
_can_hold_na = True
_na_value = NaT
@cache_readonly
def _isnan(self):
return (self.asi8 == iNaT)
@property
def asobject(self):
warnings.warn("'asobject' is deprecated. Use 'astype(object)'"
" instead", FutureWarning, stacklevel=2)
return self.astype(object)
def _convert_tolerance(self, tolerance, target):
tolerance = np.asarray(to_timedelta(tolerance, box=False))
if target.size != tolerance.size and tolerance.size > 1:
raise ValueError('list-like tolerance size must match '
'target index size')
return tolerance
def _maybe_mask_results(self, result, fill_value=None, convert=None):
if self.hasnans:
if convert:
result = result.astype(convert)
if fill_value is None:
fill_value = np.nan
result[self._isnan] = fill_value
return result
def tolist(self):
return list(self.astype(object))
def min(self, axis=None, *args, **kwargs):
nv.validate_min(args, kwargs)
try:
i8 = self.asi8
if len(i8) and self.is_monotonic:
if i8[0] != iNaT:
return self._box_func(i8[0])
if self.hasnans:
min_stamp = self[~self._isnan].asi8.min()
else:
min_stamp = i8.min()
return self._box_func(min_stamp)
except ValueError:
return self._na_value
def argmin(self, axis=None, *args, **kwargs):
nv.validate_argmin(args, kwargs)
i8 = self.asi8
if self.hasnans:
mask = self._isnan
if mask.all():
return -1
i8 = i8.copy()
i8[mask] = np.iinfo('int64').max
return i8.argmin()
def max(self, axis=None, *args, **kwargs):
nv.validate_max(args, kwargs)
try:
i8 = self.asi8
if len(i8) and self.is_monotonic:
if i8[-1] != iNaT:
return self._box_func(i8[-1])
if self.hasnans:
max_stamp = self[~self._isnan].asi8.max()
else:
max_stamp = i8.max()
return self._box_func(max_stamp)
except ValueError:
return self._na_value
def argmax(self, axis=None, *args, **kwargs):
nv.validate_argmax(args, kwargs)
i8 = self.asi8
if self.hasnans:
mask = self._isnan
if mask.all():
return -1
i8 = i8.copy()
i8[mask] = 0
return i8.argmax()
@property
def _formatter_func(self):
raise com.AbstractMethodError(self)
def _format_attrs(self):
attrs = super(DatetimeIndexOpsMixin, self)._format_attrs()
for attrib in self._attributes:
if attrib == 'freq':
freq = self.freqstr
if freq is not None:
freq = "'%s'" % freq
attrs.append(('freq', freq))
return attrs
@cache_readonly
def _resolution(self):
return frequencies.Resolution.get_reso_from_freq(self.freqstr)
@cache_readonly
def resolution(self):
return frequencies.Resolution.get_str(self._resolution)
def _convert_scalar_indexer(self, key, kind=None):
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
# we don't allow float indexing for ix/getitem
if is_scalar(key):
is_int = is_integer(key)
is_flt = is_float(key)
if kind in ['loc'] and (is_int or is_flt):
self._invalid_indexer('index', key)
elif kind in ['ix', 'getitem'] and is_flt:
self._invalid_indexer('index', key)
return (super(DatetimeIndexOpsMixin, self)
._convert_scalar_indexer(key, kind=kind))
def _add_datelike(self, other):
raise TypeError("cannot add {cls} and {typ}"
.format(cls=type(self).__name__,
typ=type(other).__name__))
def _sub_datelike(self, other):
raise com.AbstractMethodError(self)
def _add_nat(self):
if is_period_dtype(self):
raise TypeError('Cannot add {cls} and {typ}'
.format(cls=type(self).__name__,
typ=type(NaT).__name__))
ub_nat(self):
t = self._nat_new(box=False)
return result.view('timedelta64[ns]')
def _sub_period(self, other):
return NotImplemented
def _add_offset(self, offset):
raise com.AbstractMethodError(self)
def _addsub_offset_array(self, other, op):
assert op in [operator.add, operator.sub]
if len(other) == 1:
return op(self, other[0])
warnings.warn("Adding/subtracting array of DateOffsets to "
"{cls} not vectorized"
.format(cls=type(self).__name__), PerformanceWarning)
res_values = op(self.astype('O').values, np.array(other))
kwargs = {}
if not is_period_dtype(self):
kwargs['freq'] = 'infer'
return self._constructor(res_values, **kwargs)
@classmethod
def _add_datetimelike_methods(cls):
def __add__(self, other):
from pandas import DateOffset
other = lib.item_from_zerodim(other)
if isinstance(other, (ABCSeries, ABCDataFrame)):
return NotImplemented
elif other is NaT:
result = self._add_nat()
elif isinstance(other, (Tick, timedelta, np.timedelta64)):
result = self._add_delta(other)
elif isinstance(other, DateOffset):
result = self._add_offset(other)
elif isinstance(other, (datetime, np.datetime64)):
result = self._add_datelike(other)
elif is_integer(other):
result = self.shift(other)
elif is_timedelta64_dtype(other):
result = self._add_delta(other)
elif is_offsetlike(other):
result = self._addsub_offset_array(other, operator.add)
elif is_datetime64_dtype(other) or is_datetime64tz_dtype(other):
return self._add_datelike(other)
elif is_integer_dtype(other) and self.freq is None:
raise NullFrequencyError("Cannot shift with no freq")
elif is_float_dtype(other):
raise TypeError("cannot add {dtype}-dtype to {cls}"
.format(dtype=other.dtype,
cls=type(self).__name__))
else:
return NotImplemented
if result is NotImplemented:
return NotImplemented
elif not isinstance(result, Index):
result = Index(result)
res_name = ops.get_op_result_name(self, other)
result.name = res_name
return result
cls.__add__ = __add__
def __radd__(self, other):
return self.__add__(other)
cls.__radd__ = __radd__
def __sub__(self, other):
from pandas import Index
other = lib.item_from_zerodim(other)
if isinstance(other, (ABCSeries, ABCDataFrame)):
return NotImplemented
elif other is NaT:
result = self._sub_nat()
elif isinstance(other, (Tick, timedelta, np.timedelta64)):
result = self._add_delta(-other)
elif isinstance(other, DateOffset):
result = self._add_offset(-other)
elif isinstance(other, (datetime, np.datetime64)):
result = self._sub_datelike(other)
elif is_integer(other):
result = self.shift(-other)
elif isinstance(other, Period):
result = self._sub_period(other)
elif is_timedelta64_dtype(other):
result = self._add_delta(-other)
elif is_offsetlike(other):
result = self._addsub_offset_array(other, operator.sub)
elif is_datetime64_dtype(other) or is_datetime64tz_dtype(other):
result = self._sub_datelike(other)
elif isinstance(other, Index):
raise TypeError("cannot subtract {cls} and {typ}"
.format(cls=type(self).__name__,
typ=type(other).__name__))
elif is_integer_dtype(other) and self.freq is None:
raise NullFrequencyError("Cannot shift with no freq")
elif is_float_dtype(other):
raise TypeError("cannot subtract {dtype}-dtype from {cls}"
.format(dtype=other.dtype,
cls=type(self).__name__))
else:
return NotImplemented
if result is NotImplemented:
return NotImplemented
elif not isinstance(result, Index):
result = Index(result)
res_name = ops.get_op_result_name(self, other)
result.name = res_name
return result
cls.__sub__ = __sub__
def __rsub__(self, other):
if is_datetime64_dtype(other) and is_timedelta64_dtype(self):
from pandas import DatetimeIndex
return DatetimeIndex(other) - self
return -(self - other)
cls.__rsub__ = __rsub__
def __iadd__(self, other):
return self.__add__(other)
cls.__iadd__ = __iadd__
def __isub__(self, other):
return self.__sub__(other)
cls.__isub__ = __isub__
def _add_delta(self, other):
return NotImplemented
def _add_delta_td(self, other):
inc = delta_to_nanoseconds(other)
new_values = checked_add_with_arr(self.asi8, inc,
arr_mask=self._isnan).view('i8')
if self.hasnans:
new_values[self._isnan] = iNaT
return new_values.view('i8')
def _add_delta_tdi(self, other):
if not len(self) == len(other):
raise ValueError("cannot add indices of unequal length")
self_i8 = self.asi8
other_i8 = other.asi8
new_values = checked_add_with_arr(self_i8, other_i8,
arr_mask=self._isnan,
b_mask=other._isnan)
if self.hasnans or other.hasnans:
mask = (self._isnan) | (other._isnan)
new_values[mask] = iNaT
return new_values.view('i8')
def isin(self, values):
if not isinstance(values, type(self)):
try:
values = type(self)(values)
except ValueError:
return self.astype(object).isin(values)
return algorithms.isin(self.asi8, values.asi8)
def shift(self, n, freq=None):
if freq is not None and freq != self.freq:
if isinstance(freq, compat.string_types):
freq = frequencies.to_offset(freq)
offset = n * freq
result = self + offset
if hasattr(self, 'tz'):
result.tz = self.tz
return result
if n == 0:
return self
if self.freq is None:
raise NullFrequencyError("Cannot shift with no freq")
start = self[0] + n * self.freq
end = self[-1] + n * self.freq
attribs = self._get_attributes_dict()
attribs['start'] = start
attribs['end'] = end
return type(self)(**attribs)
def repeat(self, repeats, *args, **kwargs):
nv.validate_repeat(args, kwargs)
if isinstance(self, ABCPeriodIndex):
freq = self.freq
else:
freq = None
return self._shallow_copy(self.asi8.repeat(repeats),
freq=freq)
@Appender(_index_shared_docs['where'] % _index_doc_kwargs)
def where(self, cond, other=None):
other = _ensure_datetimelike_to_i8(other)
values = _ensure_datetimelike_to_i8(self)
result = np.where(cond, values, other).astype('i8')
result = self._ensure_localized(result)
return self._shallow_copy(result,
**self._get_attributes_dict())
def summary(self, name=None):
formatter = self._formatter_func
if len(self) > 0:
index_summary = ', %s to %s' % (formatter(self[0]),
formatter(self[-1]))
else:
index_summary = ''
if name is None:
name = type(self).__name__
result = '%s: %s entries%s' % (printing.pprint_thing(name),
len(self), index_summary)
if self.freq:
result += '\nFreq: %s' % self.freqstr
result = result.replace("'", "")
return result
def _concat_same_dtype(self, to_concat, name):
attribs = self._get_attributes_dict()
attribs['name'] = name
if not isinstance(self, ABCPeriodIndex):
# reset freq
attribs['freq'] = None
if getattr(self, 'tz', None) is not None:
return _concat._concat_datetimetz(to_concat, name)
else:
new_data = np.concatenate([c.asi8 for c in to_concat])
return self._simple_new(new_data, **attribs)
def astype(self, dtype, copy=True):
if is_object_dtype(dtype):
return self._box_values_as_index()
elif is_string_dtype(dtype) and not is_categorical_dtype(dtype):
return Index(self.format(), name=self.name, dtype=object)
elif is_integer_dtype(dtype):
return Index(self.values.astype('i8', copy=copy), name=self.name,
dtype='i8')
elif (is_datetime_or_timedelta_dtype(dtype) and
not is_dtype_equal(self.dtype, dtype)) or is_float_dtype(dtype):
# disallow conversion between datetime/timedelta,
# and conversions for any datetimelike to float
msg = 'Cannot cast {name} to dtype {dtype}'
raise TypeError(msg.format(name=type(self).__name__, dtype=dtype))
return super(DatetimeIndexOpsMixin, self).astype(dtype, copy=copy)
def _ensure_datetimelike_to_i8(other):
if is_scalar(other) and isna(other):
other = iNaT
elif isinstance(other, ABCIndexClass):
# convert tz if needed
if getattr(other, 'tz', None) is not None:
other = other.tz_localize(None).asi8
else:
other = other.asi8
else:
try:
other = np.array(other, copy=False).view('i8')
except TypeError:
# period array cannot be coerces to int
other = Index(other).asi8
return other
| true
| true
|
1c414889fae1a3c3b88f00ad952b2fd6174bbd2d
| 5,416
|
py
|
Python
|
test/unit/cypherpunkpay/bitcoin/pwuille/segwit_addr_test.py
|
RandyMcMillan/CypherpunkPay
|
997fc1e06865d6be75c46b719e673b2f50546f44
|
[
"MIT",
"Unlicense"
] | null | null | null |
test/unit/cypherpunkpay/bitcoin/pwuille/segwit_addr_test.py
|
RandyMcMillan/CypherpunkPay
|
997fc1e06865d6be75c46b719e673b2f50546f44
|
[
"MIT",
"Unlicense"
] | null | null | null |
test/unit/cypherpunkpay/bitcoin/pwuille/segwit_addr_test.py
|
RandyMcMillan/CypherpunkPay
|
997fc1e06865d6be75c46b719e673b2f50546f44
|
[
"MIT",
"Unlicense"
] | null | null | null |
# Copyright (c) 2017 Pieter Wuille
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Reference tests for segwit adresses"""
import binascii
import unittest
from cypherpunkpay.bitcoin.pwuille import segwit_addr
def segwit_scriptpubkey(witver, witprog):
"""Construct a Segwit scriptPubKey for a given witness program."""
return bytes([witver + 0x50 if witver else 0, len(witprog)] + witprog)
VALID_CHECKSUM = [
"A12UEL5L",
"an83characterlonghumanreadablepartthatcontainsthenumber1andtheexcludedcharactersbio1tt5tgs",
"abcdef1qpzry9x8gf2tvdw0s3jn54khce6mua7lmqqqxw",
"11qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqc8247j",
"split1checkupstagehandshakeupstreamerranterredcaperred2y9e3w",
]
INVALID_CHECKSUM = [
" 1nwldj5",
"\x7F" + "1axkwrx",
"an84characterslonghumanreadablepartthatcontainsthenumber1andtheexcludedcharactersbio1569pvx",
"pzry9x0s0muk",
"1pzry9x0s0muk",
"x1b4n0q5v",
"li1dgmt3",
"de1lg7wt\xff",
]
VALID_ADDRESS = [
["BC1QW508D6QEJXTDG4Y5R3ZARVARY0C5XW7KV8F3T4", "0014751e76e8199196d454941c45d1b3a323f1433bd6"],
["tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sl5k7",
"00201863143c14c5166804bd19203356da136c985678cd4d27a1b8c6329604903262"],
["bc1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7k7grplx",
"5128751e76e8199196d454941c45d1b3a323f1433bd6751e76e8199196d454941c45d1b3a323f1433bd6"],
["BC1SW50QA3JX3S", "6002751e"],
["bc1zw508d6qejxtdg4y5r3zarvaryvg6kdaj", "5210751e76e8199196d454941c45d1b3a323"],
["tb1qqqqqp399et2xygdj5xreqhjjvcmzhxw4aywxecjdzew6hylgvsesrxh6hy",
"0020000000c4a5cad46221b2a187905e5266362b99d5e91c6ce24d165dab93e86433"],
]
INVALID_ADDRESS = [
"tc1qw508d6qejxtdg4y5r3zarvary0c5xw7kg3g4ty",
"bc1qw508d6qejxtdg4y5r3zarvary0c5xw7kv8f3t5",
"BC13W508D6QEJXTDG4Y5R3ZARVARY0C5XW7KN40WF2",
"bc1rw5uspcuh",
"bc10w508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7kw5rljs90",
"BC1QR508D6QEJXTDG4Y5R3ZARVARYV98GJ9P",
"tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sL5k7",
"bc1zw508d6qejxtdg4y5r3zarvaryvqyzf3du",
"tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3pjxtptv",
"bc1gmk9yu",
]
INVALID_ADDRESS_ENC = [
("BC", 0, 20),
("bc", 0, 21),
("bc", 17, 32),
("bc", 1, 1),
("bc", 16, 41),
]
class TestSegwitAddress(unittest.TestCase):
"""Unit test class for segwit addressess."""
def test_valid_checksum(self):
"""Test checksum creation and validation."""
for test in VALID_CHECKSUM:
hrp, _ = segwit_addr.bech32_decode(test)
self.assertIsNotNone(hrp)
pos = test.rfind('1')
test = test[:pos+1] + chr(ord(test[pos + 1]) ^ 1) + test[pos+2:]
hrp, _ = segwit_addr.bech32_decode(test)
self.assertIsNone(hrp)
def test_invalid_checksum(self):
"""Test validation of invalid checksums."""
for test in INVALID_CHECKSUM:
hrp, _ = segwit_addr.bech32_decode(test)
self.assertIsNone(hrp)
def test_valid_address(self):
"""Test whether valid addresses decode to the correct output."""
for (address, hexscript) in VALID_ADDRESS:
hrp = "bc"
witver, witprog = segwit_addr.decode(hrp, address)
if witver is None:
hrp = "tb"
witver, witprog = segwit_addr.decode(hrp, address)
self.assertIsNotNone(witver)
scriptpubkey = segwit_scriptpubkey(witver, witprog)
self.assertEqual(scriptpubkey, binascii.unhexlify(hexscript))
addr = segwit_addr.encode(hrp, witver, witprog)
self.assertEqual(address.lower(), addr)
def test_invalid_address(self):
"""Test whether invalid addresses fail to decode."""
for test in INVALID_ADDRESS:
witver, _ = segwit_addr.decode("bc", test)
self.assertIsNone(witver)
witver, _ = segwit_addr.decode("tb", test)
self.assertIsNone(witver)
def test_invalid_address_enc(self):
"""Test whether address encoding fails on invalid input."""
for hrp, version, length in INVALID_ADDRESS_ENC:
code = segwit_addr.encode(hrp, version, [0] * length)
self.assertIsNone(code)
if __name__ == "__main__":
unittest.main()
| 39.246377
| 99
| 0.722304
|
import binascii
import unittest
from cypherpunkpay.bitcoin.pwuille import segwit_addr
def segwit_scriptpubkey(witver, witprog):
return bytes([witver + 0x50 if witver else 0, len(witprog)] + witprog)
VALID_CHECKSUM = [
"A12UEL5L",
"an83characterlonghumanreadablepartthatcontainsthenumber1andtheexcludedcharactersbio1tt5tgs",
"abcdef1qpzry9x8gf2tvdw0s3jn54khce6mua7lmqqqxw",
"11qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqc8247j",
"split1checkupstagehandshakeupstreamerranterredcaperred2y9e3w",
]
INVALID_CHECKSUM = [
" 1nwldj5",
"\x7F" + "1axkwrx",
"an84characterslonghumanreadablepartthatcontainsthenumber1andtheexcludedcharactersbio1569pvx",
"pzry9x0s0muk",
"1pzry9x0s0muk",
"x1b4n0q5v",
"li1dgmt3",
"de1lg7wt\xff",
]
VALID_ADDRESS = [
["BC1QW508D6QEJXTDG4Y5R3ZARVARY0C5XW7KV8F3T4", "0014751e76e8199196d454941c45d1b3a323f1433bd6"],
["tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sl5k7",
"00201863143c14c5166804bd19203356da136c985678cd4d27a1b8c6329604903262"],
["bc1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7k7grplx",
"5128751e76e8199196d454941c45d1b3a323f1433bd6751e76e8199196d454941c45d1b3a323f1433bd6"],
["BC1SW50QA3JX3S", "6002751e"],
["bc1zw508d6qejxtdg4y5r3zarvaryvg6kdaj", "5210751e76e8199196d454941c45d1b3a323"],
["tb1qqqqqp399et2xygdj5xreqhjjvcmzhxw4aywxecjdzew6hylgvsesrxh6hy",
"0020000000c4a5cad46221b2a187905e5266362b99d5e91c6ce24d165dab93e86433"],
]
INVALID_ADDRESS = [
"tc1qw508d6qejxtdg4y5r3zarvary0c5xw7kg3g4ty",
"bc1qw508d6qejxtdg4y5r3zarvary0c5xw7kv8f3t5",
"BC13W508D6QEJXTDG4Y5R3ZARVARY0C5XW7KN40WF2",
"bc1rw5uspcuh",
"bc10w508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7kw5rljs90",
"BC1QR508D6QEJXTDG4Y5R3ZARVARYV98GJ9P",
"tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sL5k7",
"bc1zw508d6qejxtdg4y5r3zarvaryvqyzf3du",
"tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3pjxtptv",
"bc1gmk9yu",
]
INVALID_ADDRESS_ENC = [
("BC", 0, 20),
("bc", 0, 21),
("bc", 17, 32),
("bc", 1, 1),
("bc", 16, 41),
]
class TestSegwitAddress(unittest.TestCase):
def test_valid_checksum(self):
for test in VALID_CHECKSUM:
hrp, _ = segwit_addr.bech32_decode(test)
self.assertIsNotNone(hrp)
pos = test.rfind('1')
test = test[:pos+1] + chr(ord(test[pos + 1]) ^ 1) + test[pos+2:]
hrp, _ = segwit_addr.bech32_decode(test)
self.assertIsNone(hrp)
def test_invalid_checksum(self):
for test in INVALID_CHECKSUM:
hrp, _ = segwit_addr.bech32_decode(test)
self.assertIsNone(hrp)
def test_valid_address(self):
for (address, hexscript) in VALID_ADDRESS:
hrp = "bc"
witver, witprog = segwit_addr.decode(hrp, address)
if witver is None:
hrp = "tb"
witver, witprog = segwit_addr.decode(hrp, address)
self.assertIsNotNone(witver)
scriptpubkey = segwit_scriptpubkey(witver, witprog)
self.assertEqual(scriptpubkey, binascii.unhexlify(hexscript))
addr = segwit_addr.encode(hrp, witver, witprog)
self.assertEqual(address.lower(), addr)
def test_invalid_address(self):
for test in INVALID_ADDRESS:
witver, _ = segwit_addr.decode("bc", test)
self.assertIsNone(witver)
witver, _ = segwit_addr.decode("tb", test)
self.assertIsNone(witver)
def test_invalid_address_enc(self):
for hrp, version, length in INVALID_ADDRESS_ENC:
code = segwit_addr.encode(hrp, version, [0] * length)
self.assertIsNone(code)
if __name__ == "__main__":
unittest.main()
| true
| true
|
1c41492182a3eacb4f51a6f4a44790719a7da00d
| 2,062
|
py
|
Python
|
src/testcase.py
|
JeremyCCHsu/test-debug
|
88b3d153a4fa8d2b3eb905e9e3fd74bc9b5f9f55
|
[
"Apache-2.0"
] | null | null | null |
src/testcase.py
|
JeremyCCHsu/test-debug
|
88b3d153a4fa8d2b3eb905e9e3fd74bc9b5f9f55
|
[
"Apache-2.0"
] | null | null | null |
src/testcase.py
|
JeremyCCHsu/test-debug
|
88b3d153a4fa8d2b3eb905e9e3fd74bc9b5f9f55
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
from wavegrad.params import params
from wavegrad.preprocess import transform
from wavegrad.inference import predict
import soundfile as sf
import torchaudio
# params = { 'noise_schedule': np.load('/path/to/noise_schedule.npy') }
from pathlib import Path
Fs = params.sample_rate
# model_dir = '../wavegrad-24kHz.pt'
model_dir = "wavegrad-16khz-libri.pt"
# device = "cuda" if torch.cuda.is_available() else "cpu"
filename = Path(
"/data/speech/LibriSpeech/train-clean-100/103/1240/103-1240-0005.flac"
# "/home/chincheh/datasets/BilingualNews/valid/bilin-both-01.wav"
# "/home/chincheh/datasets/VCTK-Corpus/wav48/p225/p225_002.wav"
# "/home/chincheh/datasets/VCTK-Corpus/wav48/p360/p360_057.wav"
# "/data/speech/LJSpeech-1.1/wavs/LJ050-0271.wav"
# "/home/chincheh/test/wav22k/p360_057.wav"
# "/home/chincheh/test/wavs/p360_057.wav"
# "/home/chincheh/datasets/Conan20/valid/Conan-Chelsea-Hilary.wav"
)
# p292/p292_079.wav")
# get your hands on a spectrogram in [N,C,W] format
spectrogram, wav = transform(filename)
# spectrogram
# sf.write(f"input-{Fs}.wav", wav[0].detach().cpu().numpy(), Fs)
stem = filename.stem
torchaudio.save(f"input-resample-{stem}-{Fs}.wav", wav, Fs)
# audio, fs = predict(spectrogram, model_dir, params=params)
severity = 1000
# audio, fs = predict(spectrogram, model_dir, params=params, audio=wav, severity=severity)
audio, fs = predict(spectrogram, model_dir, params=params)
torchaudio.save(f"output-resample-{stem}-{Fs}-denoised-{severity}.wav", audio[0].detach().cpu(), Fs)
# # ======================
# from_sox = np.load("/home/chincheh/test/wav22k/p360_057.wav.spec.npy")
# fig, ax = plt.subplots(3, 1)
# im = ax[0].imshow(from_sox, origin="lower")
# fig.colorbar(im, ax=ax[0])
# im = ax[1].imshow(spectrogram.numpy(), origin="lower")
# fig.colorbar(im, ax=ax[1])
# im = ax[2].imshow(from_sox - spectrogram.numpy(), origin="lower")
# fig.colorbar(im, ax=ax[2])
# fig.savefig("test.png")
# plt.close()
# # ======================
| 30.323529
| 100
| 0.694956
|
import numpy as np
import matplotlib.pyplot as plt
from wavegrad.params import params
from wavegrad.preprocess import transform
from wavegrad.inference import predict
import soundfile as sf
import torchaudio
from pathlib import Path
Fs = params.sample_rate
model_dir = "wavegrad-16khz-libri.pt"
filename = Path(
"/data/speech/LibriSpeech/train-clean-100/103/1240/103-1240-0005.flac"
)
# get your hands on a spectrogram in [N,C,W] format
spectrogram, wav = transform(filename)
# spectrogram
# sf.write(f"input-{Fs}.wav", wav[0].detach().cpu().numpy(), Fs)
stem = filename.stem
torchaudio.save(f"input-resample-{stem}-{Fs}.wav", wav, Fs)
# audio, fs = predict(spectrogram, model_dir, params=params)
severity = 1000
# audio, fs = predict(spectrogram, model_dir, params=params, audio=wav, severity=severity)
audio, fs = predict(spectrogram, model_dir, params=params)
torchaudio.save(f"output-resample-{stem}-{Fs}-denoised-{severity}.wav", audio[0].detach().cpu(), Fs)
# # ======================
# from_sox = np.load("/home/chincheh/test/wav22k/p360_057.wav.spec.npy")
# fig, ax = plt.subplots(3, 1)
# im = ax[0].imshow(from_sox, origin="lower")
# fig.colorbar(im, ax=ax[0])
# im = ax[1].imshow(spectrogram.numpy(), origin="lower")
# fig.colorbar(im, ax=ax[1])
# im = ax[2].imshow(from_sox - spectrogram.numpy(), origin="lower")
# fig.colorbar(im, ax=ax[2])
# fig.savefig("test.png")
# plt.close()
# # ======================
| true
| true
|
1c4149e6490ac6ab923b2339c90a9b498e267805
| 9,718
|
py
|
Python
|
src/detection/scripts/aggregate_predictions.py
|
anuragreddygv323/raster-vision
|
db2bc35f21968618a333cee2f5e86f29e7d56483
|
[
"Apache-2.0"
] | null | null | null |
src/detection/scripts/aggregate_predictions.py
|
anuragreddygv323/raster-vision
|
db2bc35f21968618a333cee2f5e86f29e7d56483
|
[
"Apache-2.0"
] | null | null | null |
src/detection/scripts/aggregate_predictions.py
|
anuragreddygv323/raster-vision
|
db2bc35f21968618a333cee2f5e86f29e7d56483
|
[
"Apache-2.0"
] | 1
|
2021-06-08T17:33:23.000Z
|
2021-06-08T17:33:23.000Z
|
import json
import argparse
from os import makedirs
from os.path import join, splitext
import numpy as np
from scipy.ndimage import imread
from scipy.misc import imsave
import matplotlib as mpl
mpl.use('Agg') # NOQA
import matplotlib.pyplot as plt
from cv2 import groupRectangles
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
from utils import load_tiff
from settings import max_num_classes
def compute_agg_predictions(window_offsets, window_size, im_size, predictions):
''' Aggregate window predictions into predictions for original image. '''
boxes = []
scores = []
classes = []
file_names = sorted(predictions.keys())
for file_name in file_names:
preds = predictions[file_name]
x, y = window_offsets[file_name]
for box in preds['boxes']:
# box is (ymin, xmin, ymax, xmax) in relative coords
# (eg. 0.5 is middle of axis).
# x, y are in pixel offsets.
box = np.array(box) * window_size
box[0] += y # ymin
box[1] += x # xmin
box[2] += y # ymax
box[3] += x # xmax
box[0] /= im_size[1]
box[1] /= im_size[0]
box[2] /= im_size[1]
box[3] /= im_size[0]
box = np.clip(box, 0, 1).tolist()
boxes.append(box)
scores.extend(preds['scores'])
classes.extend([int(class_id) for class_id in preds['classes']])
return boxes, scores, classes
def plot_predictions(plot_path, im, category_index, boxes, scores, classes):
min_val = np.min(im)
max_val = np.max(im)
norm_im = 256 * ((im - min_val) / (max_val - min_val))
norm_im = norm_im.astype(np.uint8)
vis_util.visualize_boxes_and_labels_on_image_array(
norm_im,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=4)
imsave(plot_path, norm_im)
def box_to_cv2_rect(im_size, box):
ymin, xmin, ymax, xmax = box
width = xmax - xmin
height = ymax - ymin
xmin = int(xmin * im_size[0])
width = int(width * im_size[0])
ymin = int(ymin * im_size[1])
height = int(height * im_size[1])
rect = (xmin, ymin, width, height)
return rect
def cv2_rect_to_box(im_size, rect):
x, y, width, height = rect
x /= im_size[0]
width /= im_size[0]
y /= im_size[1]
height /= im_size[1]
box = [y, x, y + height, x + width]
return box
# From https://stackoverflow.com/questions/28723670/intersection-over-union-between-two-detections # noqa
def bb_intersection_over_union(boxA, boxB):
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
# compute the area of intersection rectangle
interArea = (xB - xA + 1) * (yB - yA + 1)
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = interArea / float(boxAArea + boxBArea - interArea)
# return the intersection over union value
return iou
def rect_to_bbox(rect):
x, y, width, height = rect
return [x, y, x + width, y + height]
def group_boxes(boxes, scores, im_size):
'''Group boxes belonging to a single class.'''
# Convert boxes to opencv rectangles
rects = []
for box_ind in range(boxes.shape[0]):
box = boxes[box_ind, :].tolist()
rect = box_to_cv2_rect(im_size, box)
rects.append(rect)
# Add last rect again to ensure that there are at least two rectangles
# which seems to be required by groupRectangles.
rects.append(rect)
# Group the rects
group_threshold = 1
# May need to tune this parameter for other datasets depending on size
# of detected objects.
eps = 0.5
grouped_rects = groupRectangles(rects, group_threshold, eps)[0]
grouped_boxes = []
grouped_scores = []
# Find the rects and corresponding scores that best match the grouped_rects
for grouped_rect in grouped_rects:
bbox1 = rect_to_bbox(grouped_rect)
best_iou = 0.0
best_ind = None
for rect_ind, rect in enumerate(rects[:-1]):
bbox2 = rect_to_bbox(rect)
iou = bb_intersection_over_union(bbox1, bbox2)
if iou > best_iou:
best_iou = iou
best_ind = rect_ind
grouped_boxes.append(cv2_rect_to_box(im_size, rects[best_ind]))
grouped_scores.append(scores[best_ind])
return grouped_boxes, grouped_scores
def group_predictions(boxes, classes, scores, im_size):
'''For each class, group boxes that are overlapping.'''
unique_classes = list(set(classes))
boxes = np.array(boxes)
classes = np.array(classes)
scores = np.array(scores)
grouped_boxes = []
grouped_classes = []
grouped_scores = []
for class_id in unique_classes:
class_boxes = boxes[classes == class_id]
class_scores = scores[classes == class_id]
class_grouped_boxes, class_grouped_scores = \
group_boxes(class_boxes, class_scores, im_size)
grouped_boxes.extend(class_grouped_boxes)
grouped_classes.extend([class_id] * len(class_grouped_boxes))
grouped_scores.extend(class_grouped_scores)
return grouped_boxes, grouped_classes, grouped_scores
def save_geojson(path, boxes, classes, scores, im_size, category_index,
image_dataset=None):
polygons = []
for box in boxes:
x, y, width, height = box_to_cv2_rect(im_size, box)
nw = (x, y)
ne = (x + width, y)
se = (x + width, y + height)
sw = (x, y + height)
polygon = [nw, ne, se, sw, nw]
# Transform from pixel coords to spatial coords
if image_dataset:
polygon = [image_dataset.ul(point[1], point[0])
for point in polygon]
polygons.append(polygon)
crs = None
if image_dataset:
# XXX not sure if I'm getting this properly
crs_name = image_dataset.crs['init']
crs = {
'type': 'name',
'properties': {
'name': crs_name
}
}
features = [{
'type': 'Feature',
'properties': {
'class_id': int(class_id),
'class_name': category_index[class_id]['name'],
'score': score
},
'geometry': {
'type': 'Polygon',
'coordinates': [polygon]
}
}
for polygon, class_id, score in zip(polygons, classes, scores)
]
geojson = {
'type': 'FeatureCollection',
'crs': crs,
'features': features
}
with open(path, 'w') as json_file:
json.dump(geojson, json_file, indent=4)
def aggregate_predictions(image_path, window_info_path, predictions_path,
label_map_path, output_dir):
print('Aggregating predictions over windows...')
label_map = label_map_util.load_labelmap(label_map_path)
categories = label_map_util.convert_label_map_to_categories(
label_map, max_num_classes=max_num_classes, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
image_dataset = None
if splitext(image_path)[1] == '.tif':
im, image_dataset = load_tiff(image_path)
else:
im = imread(image_path)
im_size = [im.shape[1], im.shape[0]]
with open(window_info_path) as window_info_file:
window_info = json.load(window_info_file)
window_offsets = window_info['offsets']
window_size = window_info['window_size']
with open(predictions_path) as predictions_file:
predictions = json.load(predictions_file)
makedirs(output_dir, exist_ok=True)
boxes, scores, classes = compute_agg_predictions(
window_offsets, window_size, im_size, predictions)
# Due to the sliding window approach, sometimes there are multiple
# slightly different detections where there should only be one. So
# we group them together.
# boxes, classes, scores = group_predictions(boxes, classes, scores, im_size)
agg_predictions_path = join(output_dir, 'predictions.geojson')
save_geojson(agg_predictions_path, boxes, classes, scores, im_size,
category_index, image_dataset=image_dataset)
plot_path = join(output_dir, 'predictions.jpg')
plot_predictions(plot_path, im, category_index, boxes, scores, classes)
def parse_args():
description = """
Aggregate predictions from windows into predictions over original
image. The output is GeoJSON in the CRS of the input image.
"""
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--image-path')
parser.add_argument('--window-info-path')
parser.add_argument('--predictions-path')
parser.add_argument('--label-map-path')
parser.add_argument('--output-dir')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
print(args)
aggregate_predictions(
args.image_path, args.window_info_path, args.predictions_path,
args.label_map_path, args.output_dir)
| 30.949045
| 105
| 0.63758
|
import json
import argparse
from os import makedirs
from os.path import join, splitext
import numpy as np
from scipy.ndimage import imread
from scipy.misc import imsave
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from cv2 import groupRectangles
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
from utils import load_tiff
from settings import max_num_classes
def compute_agg_predictions(window_offsets, window_size, im_size, predictions):
boxes = []
scores = []
classes = []
file_names = sorted(predictions.keys())
for file_name in file_names:
preds = predictions[file_name]
x, y = window_offsets[file_name]
for box in preds['boxes']:
box = np.array(box) * window_size
box[0] += y
box[1] += x
box[2] += y
box[3] += x
box[0] /= im_size[1]
box[1] /= im_size[0]
box[2] /= im_size[1]
box[3] /= im_size[0]
box = np.clip(box, 0, 1).tolist()
boxes.append(box)
scores.extend(preds['scores'])
classes.extend([int(class_id) for class_id in preds['classes']])
return boxes, scores, classes
def plot_predictions(plot_path, im, category_index, boxes, scores, classes):
min_val = np.min(im)
max_val = np.max(im)
norm_im = 256 * ((im - min_val) / (max_val - min_val))
norm_im = norm_im.astype(np.uint8)
vis_util.visualize_boxes_and_labels_on_image_array(
norm_im,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=4)
imsave(plot_path, norm_im)
def box_to_cv2_rect(im_size, box):
ymin, xmin, ymax, xmax = box
width = xmax - xmin
height = ymax - ymin
xmin = int(xmin * im_size[0])
width = int(width * im_size[0])
ymin = int(ymin * im_size[1])
height = int(height * im_size[1])
rect = (xmin, ymin, width, height)
return rect
def cv2_rect_to_box(im_size, rect):
x, y, width, height = rect
x /= im_size[0]
width /= im_size[0]
y /= im_size[1]
height /= im_size[1]
box = [y, x, y + height, x + width]
return box
b_intersection_over_union(boxA, boxB):
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
interArea = (xB - xA + 1) * (yB - yA + 1)
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
iou = interArea / float(boxAArea + boxBArea - interArea)
return iou
def rect_to_bbox(rect):
x, y, width, height = rect
return [x, y, x + width, y + height]
def group_boxes(boxes, scores, im_size):
rects = []
for box_ind in range(boxes.shape[0]):
box = boxes[box_ind, :].tolist()
rect = box_to_cv2_rect(im_size, box)
rects.append(rect)
rects.append(rect)
group_threshold = 1
eps = 0.5
grouped_rects = groupRectangles(rects, group_threshold, eps)[0]
grouped_boxes = []
grouped_scores = []
for grouped_rect in grouped_rects:
bbox1 = rect_to_bbox(grouped_rect)
best_iou = 0.0
best_ind = None
for rect_ind, rect in enumerate(rects[:-1]):
bbox2 = rect_to_bbox(rect)
iou = bb_intersection_over_union(bbox1, bbox2)
if iou > best_iou:
best_iou = iou
best_ind = rect_ind
grouped_boxes.append(cv2_rect_to_box(im_size, rects[best_ind]))
grouped_scores.append(scores[best_ind])
return grouped_boxes, grouped_scores
def group_predictions(boxes, classes, scores, im_size):
unique_classes = list(set(classes))
boxes = np.array(boxes)
classes = np.array(classes)
scores = np.array(scores)
grouped_boxes = []
grouped_classes = []
grouped_scores = []
for class_id in unique_classes:
class_boxes = boxes[classes == class_id]
class_scores = scores[classes == class_id]
class_grouped_boxes, class_grouped_scores = \
group_boxes(class_boxes, class_scores, im_size)
grouped_boxes.extend(class_grouped_boxes)
grouped_classes.extend([class_id] * len(class_grouped_boxes))
grouped_scores.extend(class_grouped_scores)
return grouped_boxes, grouped_classes, grouped_scores
def save_geojson(path, boxes, classes, scores, im_size, category_index,
image_dataset=None):
polygons = []
for box in boxes:
x, y, width, height = box_to_cv2_rect(im_size, box)
nw = (x, y)
ne = (x + width, y)
se = (x + width, y + height)
sw = (x, y + height)
polygon = [nw, ne, se, sw, nw]
if image_dataset:
polygon = [image_dataset.ul(point[1], point[0])
for point in polygon]
polygons.append(polygon)
crs = None
if image_dataset:
crs_name = image_dataset.crs['init']
crs = {
'type': 'name',
'properties': {
'name': crs_name
}
}
features = [{
'type': 'Feature',
'properties': {
'class_id': int(class_id),
'class_name': category_index[class_id]['name'],
'score': score
},
'geometry': {
'type': 'Polygon',
'coordinates': [polygon]
}
}
for polygon, class_id, score in zip(polygons, classes, scores)
]
geojson = {
'type': 'FeatureCollection',
'crs': crs,
'features': features
}
with open(path, 'w') as json_file:
json.dump(geojson, json_file, indent=4)
def aggregate_predictions(image_path, window_info_path, predictions_path,
label_map_path, output_dir):
print('Aggregating predictions over windows...')
label_map = label_map_util.load_labelmap(label_map_path)
categories = label_map_util.convert_label_map_to_categories(
label_map, max_num_classes=max_num_classes, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
image_dataset = None
if splitext(image_path)[1] == '.tif':
im, image_dataset = load_tiff(image_path)
else:
im = imread(image_path)
im_size = [im.shape[1], im.shape[0]]
with open(window_info_path) as window_info_file:
window_info = json.load(window_info_file)
window_offsets = window_info['offsets']
window_size = window_info['window_size']
with open(predictions_path) as predictions_file:
predictions = json.load(predictions_file)
makedirs(output_dir, exist_ok=True)
boxes, scores, classes = compute_agg_predictions(
window_offsets, window_size, im_size, predictions)
# Due to the sliding window approach, sometimes there are multiple
# slightly different detections where there should only be one. So
# we group them together.
# boxes, classes, scores = group_predictions(boxes, classes, scores, im_size)
agg_predictions_path = join(output_dir, 'predictions.geojson')
save_geojson(agg_predictions_path, boxes, classes, scores, im_size,
category_index, image_dataset=image_dataset)
plot_path = join(output_dir, 'predictions.jpg')
plot_predictions(plot_path, im, category_index, boxes, scores, classes)
def parse_args():
description = """
Aggregate predictions from windows into predictions over original
image. The output is GeoJSON in the CRS of the input image.
"""
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--image-path')
parser.add_argument('--window-info-path')
parser.add_argument('--predictions-path')
parser.add_argument('--label-map-path')
parser.add_argument('--output-dir')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
print(args)
aggregate_predictions(
args.image_path, args.window_info_path, args.predictions_path,
args.label_map_path, args.output_dir)
| true
| true
|
1c414a4addd8bde6c44e18dfb7a96743e832ec1b
| 384
|
py
|
Python
|
day6/d6p2.py
|
francois07/advent-of-code-2020
|
7e0857406520a7cd2b7a6394639c6e69e357a34a
|
[
"MIT"
] | null | null | null |
day6/d6p2.py
|
francois07/advent-of-code-2020
|
7e0857406520a7cd2b7a6394639c6e69e357a34a
|
[
"MIT"
] | null | null | null |
day6/d6p2.py
|
francois07/advent-of-code-2020
|
7e0857406520a7cd2b7a6394639c6e69e357a34a
|
[
"MIT"
] | null | null | null |
input = [x for x in open('input.txt', 'r').read().strip().split("\n\n")]
def get_common_answers(data):
answers = set()
persons = data.splitlines()
for person in persons:
for answer in person:
if data.count(answer) == len(persons):
answers.add(answer)
return len(answers)
n = sum([get_common_answers(x) for x in input])
print(n)
| 24
| 72
| 0.601563
|
input = [x for x in open('input.txt', 'r').read().strip().split("\n\n")]
def get_common_answers(data):
answers = set()
persons = data.splitlines()
for person in persons:
for answer in person:
if data.count(answer) == len(persons):
answers.add(answer)
return len(answers)
n = sum([get_common_answers(x) for x in input])
print(n)
| true
| true
|
1c414a664c656b48812aa2ed17578881e93d5cdc
| 3,573
|
py
|
Python
|
asposepdfcloud/models/raster_images_saving_modes.py
|
kaferi/aspose-pdf-cloud-python
|
48f70742fec1e41644ec0b658db3f174ba845304
|
[
"MIT"
] | 7
|
2018-06-11T17:44:44.000Z
|
2022-02-08T05:52:48.000Z
|
asposepdfcloud/models/raster_images_saving_modes.py
|
kaferi/aspose-pdf-cloud-python
|
48f70742fec1e41644ec0b658db3f174ba845304
|
[
"MIT"
] | 1
|
2021-03-20T22:16:15.000Z
|
2021-06-27T15:11:52.000Z
|
asposepdfcloud/models/raster_images_saving_modes.py
|
kaferi/aspose-pdf-cloud-python
|
48f70742fec1e41644ec0b658db3f174ba845304
|
[
"MIT"
] | 4
|
2018-04-18T19:41:12.000Z
|
2021-06-21T13:12:24.000Z
|
# coding: utf-8
"""
Aspose.PDF Cloud API Reference
Copyright (c) 2021 Aspose.PDF Cloud
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
OpenAPI spec version: 3.0
"""
from pprint import pformat
from six import iteritems
import re
class RasterImagesSavingModes(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
allowed enum values
"""
ASPNGIMAGESEMBEDDEDINTOSVG = "AsPngImagesEmbeddedIntoSvg"
ASEXTERNALPNGFILESREFERENCEDVIASVG = "AsExternalPngFilesReferencedViaSvg"
ASEMBEDDEDPARTSOFPNGPAGEBACKGROUND = "AsEmbeddedPartsOfPngPageBackground"
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self):
"""
RasterImagesSavingModes - a model defined in Swagger
"""
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, RasterImagesSavingModes):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 29.04878
| 78
| 0.620767
|
from pprint import pformat
from six import iteritems
import re
class RasterImagesSavingModes(object):
ASPNGIMAGESEMBEDDEDINTOSVG = "AsPngImagesEmbeddedIntoSvg"
ASEXTERNALPNGFILESREFERENCEDVIASVG = "AsExternalPngFilesReferencedViaSvg"
ASEMBEDDEDPARTSOFPNGPAGEBACKGROUND = "AsEmbeddedPartsOfPngPageBackground"
swagger_types = {
}
attribute_map = {
}
def __init__(self):
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, RasterImagesSavingModes):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
1c414bb40d653e292e566df382e6f807b691cba3
| 10,268
|
py
|
Python
|
pyfda/filter_designs/cheby1.py
|
toddrme2178/pyfda
|
c20355fb36ace6902aebd1a6bc6c1a71771b84f4
|
[
"MIT"
] | 1
|
2021-05-17T21:27:39.000Z
|
2021-05-17T21:27:39.000Z
|
pyfda/filter_designs/cheby1.py
|
toddrme2178/pyfda
|
c20355fb36ace6902aebd1a6bc6c1a71771b84f4
|
[
"MIT"
] | null | null | null |
pyfda/filter_designs/cheby1.py
|
toddrme2178/pyfda
|
c20355fb36ace6902aebd1a6bc6c1a71771b84f4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# This file is part of the pyFDA project hosted at https://github.com/chipmuenk/pyfda
#
# Copyright © pyFDA Project Contributors
# Licensed under the terms of the MIT License
# (see file LICENSE in root directory for details)
"""
Design Chebychev 1 filters (LP, HP, BP, BS) with fixed or minimum order, return
the filter design in zpk (zeros, poles, gain) or second-order sections (sos) format.
Attention:
This class is re-instantiated dynamically every time the filter design method
is selected, calling its __init__ method.
API version info:
1.0: initial working release
1.1: - copy A_PB -> A_PB2 and A_SB -> A_SB2 for BS / BP designs
- mark private methods as private
1.2: new API using fil_save (enable SOS features when available)
1.3: new public methods destruct_UI + construct_UI (no longer called by __init__)
1.4: module attribute `filter_classes` contains class name and combo box name
instead of class attribute `name`
`FRMT` is now a class attribute
2.0: Specify the parameters for each subwidget as tuples in a dict where the
first element controls whether the widget is visible and / or enabled.
This dict is now called self.rt_dict. When present, the dict self.rt_dict_add
is read and merged with the first one.
2.1: Remove empty methods construct_UI and destruct_UI and attributes
self.wdg and self.hdl
:2.2: Rename `filter_classes` -> `classes`, remove Py2 compatibility
"""
import scipy.signal as sig
from scipy.signal import cheb1ord
from pyfda.pyfda_lib import fil_save, SOS_AVAIL, lin2unit
from pyfda.pyfda_qt_lib import qfilter_warning
from .common import Common
__version__ = "2.2"
classes = {'Cheby1':'Chebychev 1'} #: Dict containing class name : display name
class Cheby1(object):
if SOS_AVAIL:
FRMT = 'sos' # output format of filter design routines 'zpk' / 'ba' / 'sos'
else:
FRMT = 'zpk'
def __init__(self):
self.ft = 'IIR'
c = Common()
self.rt_dict = c.rt_base_iir
self.rt_dict_add = {
'COM':{'man':{'msg':('a',
r"Enter the filter order <b><i>N</i></b> and the critical frequency "
"or frequencies <b><i>F<sub>C</sub></i></b> where the gain first drops below "
"the maximum ripple "
"<b><i>-A<sub>PB</sub></i></b> allowed below unity gain in the "
"passband.")},
},
'LP': {'man':{}, 'min':{}},
'HP': {'man':{}, 'min':{}},
'BS': {'man':{}, 'min':{}},
'BP': {'man':{}, 'min':{}},
}
self.info = """
**Chebychev Type 1 filters**
maximize the rate of cutoff between the frequency response’s passband and stopband,
at the expense of passband ripple :math:`A_PB` and increased ringing in
the step response. The stopband drops monotonously.
Type I filters roll off faster than Type II, but Type II filters do not
have any ripple in the passband.
The passband has a constant ripple (equiripple) with a total of :math:`N` maxima
and minima (for example, a 5th-order filter has 3 maxima and 2 minima). Consequently,
the DC gain is unity for odd-order low-pass filters, and :math:`-A_PB` dB for even-order filters.
For a manual filter design, the order :math:`N`, the passband ripple :math:`A_PB` and
the critical frequency / frequencies :math:`F_C` where the gain drops below
:math:`-A_PB` have to be specified.
The ``cheb1ord()`` helper routine calculates the minimum order :math:`N` and the
critical passband frequency :math:`F_C` from passband / stopband specifications.
**Design routines:**
``scipy.signal.cheby1()``, ``scipy.signal.cheb1ord()``
"""
self.info_doc = []
self.info_doc.append('cheby1()\n========')
self.info_doc.append(sig.cheby1.__doc__)
self.info_doc.append('cheb1ord()\n==========')
self.info_doc.append(sig.cheb1ord.__doc__)
#--------------------------------------------------------------------------
def _get_params(self, fil_dict):
"""
Translate parameters from filter dictionary to instance
parameters, scaling / transforming them if needed.
"""
self.analog = False # set to True for analog filters
self.N = fil_dict['N']
# Frequencies are normalized to f_Nyq = f_S/2, ripple specs are in dB
self.F_PB = fil_dict['F_PB'] * 2
self.F_SB = fil_dict['F_SB'] * 2
self.F_C = fil_dict['F_C'] * 2
self.F_PB2 = fil_dict['F_PB2'] * 2
self.F_SB2 = fil_dict['F_SB2'] * 2
self.F_C2 = fil_dict['F_C2'] * 2
self.F_PBC = None
self.A_PB = lin2unit(fil_dict['A_PB'], 'IIR', 'A_PB', unit='dB')
self.A_SB = lin2unit(fil_dict['A_SB'], 'IIR', 'A_SB', unit='dB')
# cheby1 filter routines support only one amplitude spec for
# pass- and stop band each
if str(fil_dict['rt']) == 'BS':
fil_dict['A_PB2'] = fil_dict['A_PB']
elif str(fil_dict['rt']) == 'BP':
fil_dict['A_SB2'] = fil_dict['A_SB']
def _test_N(self):
"""
Warn the user if the calculated order is too high for a reasonable filter
design.
"""
if self.N > 30:
return qfilter_warning(None, self.N, "Chebychev 1")
else:
return True
def _save(self, fil_dict, arg):
"""
Convert results of filter design to all available formats (pz, ba, sos)
and store them in the global filter dictionary.
Corner frequencies and order calculated for minimum filter order are
also stored to allow for an easy subsequent manual filter optimization.
"""
fil_save(fil_dict, arg, self.FRMT, __name__)
# For min. filter order algorithms, update filter dictionary with calculated
# new values for filter order N and corner frequency(s) F_PBC
if str(fil_dict['fo']) == 'min':
fil_dict['N'] = self.N
if str(fil_dict['rt']) == 'LP' or str(fil_dict['rt']) == 'HP':
fil_dict['F_C'] = self.F_PBC / 2. # HP or LP - single corner frequency
else: # BP or BS - two corner frequencies
fil_dict['F_C'] = self.F_PBC[0] / 2.
fil_dict['F_C2'] = self.F_PBC[1] / 2.
#------------------------------------------------------------------------------
#
# DESIGN ROUTINES
#
#------------------------------------------------------------------------------
# LP: F_PB < F_SB ---------------------------------------------------------
def LPmin(self, fil_dict):
self._get_params(fil_dict)
self.N, self.F_PBC = cheb1ord(self.F_PB,self.F_SB, self.A_PB,self.A_SB,
analog=self.analog)
if not self._test_N():
return -1
self._save(fil_dict, sig.cheby1(self.N, self.A_PB, self.F_PBC,
btype='low', analog=self.analog, output=self.FRMT))
def LPman(self, fil_dict):
self._get_params(fil_dict)
if not self._test_N():
return -1
self._save(fil_dict, sig.cheby1(self.N, self.A_PB, self.F_C,
btype='low', analog=self.analog, output=self.FRMT))
# HP: F_SB < F_PB ---------------------------------------------------------
def HPmin(self, fil_dict):
self._get_params(fil_dict)
self.N, self.F_PBC = cheb1ord(self.F_PB,self.F_SB, self.A_PB,self.A_SB,
analog=self.analog)
if not self._test_N():
return -1
self._save(fil_dict, sig.cheby1(self.N, self.A_PB, self.F_PBC,
btype='highpass', analog=self.analog, output=self.FRMT))
def HPman(self, fil_dict):
self._get_params(fil_dict)
if not self._test_N():
return -1
self._save(fil_dict, sig.cheby1(self.N, self.A_PB, self.F_C,
btype='highpass', analog=self.analog, output=self.FRMT))
# For BP and BS, A_PB, F_PB and F_stop have two elements each:
# BP: F_SB[0] < F_PB[0], F_SB[1] > F_PB[1] --------------------------------
def BPmin(self, fil_dict):
self._get_params(fil_dict)
self.N, self.F_PBC = cheb1ord([self.F_PB, self.F_PB2],
[self.F_SB, self.F_SB2], self.A_PB,self.A_SB, analog=self.analog)
if not self._test_N():
return -1
self._save(fil_dict, sig.cheby1(self.N, self.A_PB, self.F_PBC,
btype='bandpass', analog=self.analog, output=self.FRMT))
def BPman(self, fil_dict):
self._get_params(fil_dict)
if not self._test_N():
return -1
self._save(fil_dict, sig.cheby1(self.N, self.A_PB,[self.F_C,self.F_C2],
btype='bandpass', analog=self.analog, output=self.FRMT))
# BS: F_SB[0] > F_PB[0], F_SB[1] < F_PB[1] --------------------------------
def BSmin(self, fil_dict):
self._get_params(fil_dict)
self.N, self.F_PBC = cheb1ord([self.F_PB, self.F_PB2],
[self.F_SB, self.F_SB2], self.A_PB,self.A_SB, analog = self.analog)
if not self._test_N():
return -1
self._save(fil_dict, sig.cheby1(self.N, self.A_PB, self.F_PBC,
btype='bandstop', analog=self.analog, output=self.FRMT))
def BSman(self, fil_dict):
self._get_params(fil_dict)
if not self._test_N():
return -1
self._save(fil_dict, sig.cheby1(self.N, self.A_PB, [self.F_C,self.F_C2],
btype='bandstop', analog=self.analog, output=self.FRMT))
#------------------------------------------------------------------------------
if __name__ == '__main__':
filt = Cheby1() # instantiate filter
import pyfda.filterbroker as fb # importing filterbroker initializes all its globals
filt.LPman(fb.fil[0]) # design a low-pass with parameters from global dict
print(fb.fil[0][filt.FRMT]) # return results in default format
| 40.908367
| 101
| 0.571289
|
import scipy.signal as sig
from scipy.signal import cheb1ord
from pyfda.pyfda_lib import fil_save, SOS_AVAIL, lin2unit
from pyfda.pyfda_qt_lib import qfilter_warning
from .common import Common
__version__ = "2.2"
classes = {'Cheby1':'Chebychev 1'}
class Cheby1(object):
if SOS_AVAIL:
FRMT = 'sos'
else:
FRMT = 'zpk'
def __init__(self):
self.ft = 'IIR'
c = Common()
self.rt_dict = c.rt_base_iir
self.rt_dict_add = {
'COM':{'man':{'msg':('a',
r"Enter the filter order <b><i>N</i></b> and the critical frequency "
"or frequencies <b><i>F<sub>C</sub></i></b> where the gain first drops below "
"the maximum ripple "
"<b><i>-A<sub>PB</sub></i></b> allowed below unity gain in the "
"passband.")},
},
'LP': {'man':{}, 'min':{}},
'HP': {'man':{}, 'min':{}},
'BS': {'man':{}, 'min':{}},
'BP': {'man':{}, 'min':{}},
}
self.info = """
**Chebychev Type 1 filters**
maximize the rate of cutoff between the frequency response’s passband and stopband,
at the expense of passband ripple :math:`A_PB` and increased ringing in
the step response. The stopband drops monotonously.
Type I filters roll off faster than Type II, but Type II filters do not
have any ripple in the passband.
The passband has a constant ripple (equiripple) with a total of :math:`N` maxima
and minima (for example, a 5th-order filter has 3 maxima and 2 minima). Consequently,
the DC gain is unity for odd-order low-pass filters, and :math:`-A_PB` dB for even-order filters.
For a manual filter design, the order :math:`N`, the passband ripple :math:`A_PB` and
the critical frequency / frequencies :math:`F_C` where the gain drops below
:math:`-A_PB` have to be specified.
The ``cheb1ord()`` helper routine calculates the minimum order :math:`N` and the
critical passband frequency :math:`F_C` from passband / stopband specifications.
**Design routines:**
``scipy.signal.cheby1()``, ``scipy.signal.cheb1ord()``
"""
self.info_doc = []
self.info_doc.append('cheby1()\n========')
self.info_doc.append(sig.cheby1.__doc__)
self.info_doc.append('cheb1ord()\n==========')
self.info_doc.append(sig.cheb1ord.__doc__)
def _get_params(self, fil_dict):
self.analog = False
self.N = fil_dict['N']
self.F_PB = fil_dict['F_PB'] * 2
self.F_SB = fil_dict['F_SB'] * 2
self.F_C = fil_dict['F_C'] * 2
self.F_PB2 = fil_dict['F_PB2'] * 2
self.F_SB2 = fil_dict['F_SB2'] * 2
self.F_C2 = fil_dict['F_C2'] * 2
self.F_PBC = None
self.A_PB = lin2unit(fil_dict['A_PB'], 'IIR', 'A_PB', unit='dB')
self.A_SB = lin2unit(fil_dict['A_SB'], 'IIR', 'A_SB', unit='dB')
if str(fil_dict['rt']) == 'BS':
fil_dict['A_PB2'] = fil_dict['A_PB']
elif str(fil_dict['rt']) == 'BP':
fil_dict['A_SB2'] = fil_dict['A_SB']
def _test_N(self):
if self.N > 30:
return qfilter_warning(None, self.N, "Chebychev 1")
else:
return True
def _save(self, fil_dict, arg):
fil_save(fil_dict, arg, self.FRMT, __name__)
if str(fil_dict['fo']) == 'min':
fil_dict['N'] = self.N
if str(fil_dict['rt']) == 'LP' or str(fil_dict['rt']) == 'HP':
fil_dict['F_C'] = self.F_PBC / 2.
else:
fil_dict['F_C'] = self.F_PBC[0] / 2.
fil_dict['F_C2'] = self.F_PBC[1] / 2.
def LPmin(self, fil_dict):
self._get_params(fil_dict)
self.N, self.F_PBC = cheb1ord(self.F_PB,self.F_SB, self.A_PB,self.A_SB,
analog=self.analog)
if not self._test_N():
return -1
self._save(fil_dict, sig.cheby1(self.N, self.A_PB, self.F_PBC,
btype='low', analog=self.analog, output=self.FRMT))
def LPman(self, fil_dict):
self._get_params(fil_dict)
if not self._test_N():
return -1
self._save(fil_dict, sig.cheby1(self.N, self.A_PB, self.F_C,
btype='low', analog=self.analog, output=self.FRMT))
def HPmin(self, fil_dict):
self._get_params(fil_dict)
self.N, self.F_PBC = cheb1ord(self.F_PB,self.F_SB, self.A_PB,self.A_SB,
analog=self.analog)
if not self._test_N():
return -1
self._save(fil_dict, sig.cheby1(self.N, self.A_PB, self.F_PBC,
btype='highpass', analog=self.analog, output=self.FRMT))
def HPman(self, fil_dict):
self._get_params(fil_dict)
if not self._test_N():
return -1
self._save(fil_dict, sig.cheby1(self.N, self.A_PB, self.F_C,
btype='highpass', analog=self.analog, output=self.FRMT))
def BPmin(self, fil_dict):
self._get_params(fil_dict)
self.N, self.F_PBC = cheb1ord([self.F_PB, self.F_PB2],
[self.F_SB, self.F_SB2], self.A_PB,self.A_SB, analog=self.analog)
if not self._test_N():
return -1
self._save(fil_dict, sig.cheby1(self.N, self.A_PB, self.F_PBC,
btype='bandpass', analog=self.analog, output=self.FRMT))
def BPman(self, fil_dict):
self._get_params(fil_dict)
if not self._test_N():
return -1
self._save(fil_dict, sig.cheby1(self.N, self.A_PB,[self.F_C,self.F_C2],
btype='bandpass', analog=self.analog, output=self.FRMT))
def BSmin(self, fil_dict):
self._get_params(fil_dict)
self.N, self.F_PBC = cheb1ord([self.F_PB, self.F_PB2],
[self.F_SB, self.F_SB2], self.A_PB,self.A_SB, analog = self.analog)
if not self._test_N():
return -1
self._save(fil_dict, sig.cheby1(self.N, self.A_PB, self.F_PBC,
btype='bandstop', analog=self.analog, output=self.FRMT))
def BSman(self, fil_dict):
self._get_params(fil_dict)
if not self._test_N():
return -1
self._save(fil_dict, sig.cheby1(self.N, self.A_PB, [self.F_C,self.F_C2],
btype='bandstop', analog=self.analog, output=self.FRMT))
if __name__ == '__main__':
filt = Cheby1()
import pyfda.filterbroker as fb
filt.LPman(fb.fil[0])
print(fb.fil[0][filt.FRMT])
| true
| true
|
1c414ded33c00441a31fb53a36c808e64b51b991
| 18,195
|
py
|
Python
|
jsonpickle/pickler.py
|
juggernautbooks/jsonpickle
|
e819193cfac6cc2c30bf90b2ccfc92f1e6d967d9
|
[
"BSD-3-Clause"
] | null | null | null |
jsonpickle/pickler.py
|
juggernautbooks/jsonpickle
|
e819193cfac6cc2c30bf90b2ccfc92f1e6d967d9
|
[
"BSD-3-Clause"
] | null | null | null |
jsonpickle/pickler.py
|
juggernautbooks/jsonpickle
|
e819193cfac6cc2c30bf90b2ccfc92f1e6d967d9
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008 John Paulett (john -at- paulett.org)
# Copyright (C) 2009, 2011, 2013 David Aguilar (davvid -at- gmail.com) and contributors
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
from __future__ import absolute_import, division, unicode_literals
import base64
import warnings
import sys
from itertools import chain, islice
from . import util
from . import tags
from . import handlers
from .backend import JSONBackend
from .compat import numeric_types, unicode, PY3, PY2
def encode(value,
unpicklable=False,
make_refs=True,
keys=False,
max_depth=None,
reset=True,
backend=None,
warn=False,
context=None,
max_iter=None,
numeric_keys=False):
backend = _make_backend(backend)
if context is None:
context = Pickler(unpicklable=unpicklable,
make_refs=make_refs,
keys=keys,
backend=backend,
max_depth=max_depth,
warn=warn,
max_iter=max_iter,
numeric_keys=numeric_keys)
return backend.encode(context.flatten(value, reset=reset))
def _make_backend(backend):
if backend is None:
return JSONBackend()
else:
return backend
class Pickler(object):
def __init__(self,
unpicklable=True,
make_refs=True,
max_depth=None,
backend=None,
keys=False,
warn=False,
max_iter=None,
numeric_keys=False):
self.unpicklable = unpicklable
self.make_refs = make_refs
self.backend = _make_backend(backend)
self.keys = keys
self.warn = warn
self.numeric_keys = numeric_keys
# The current recursion depth
self._depth = -1
# The maximal recursion depth
self._max_depth = max_depth
# Maps id(obj) to reference IDs
self._objs = {}
# Avoids garbage collection
self._seen = []
# maximum amount of items to take from a pickled iterator
self._max_iter = max_iter
def reset(self):
self._objs = {}
self._depth = -1
self._seen = []
def _push(self):
"""Steps down one level in the namespace.
"""
self._depth += 1
def _pop(self, value):
"""Step up one level in the namespace and return the value.
If we're at the root, reset the pickler's state.
"""
self._depth -= 1
if self._depth == -1:
self.reset()
return value
def _log_ref(self, obj):
"""
Log a reference to an in-memory object.
Return True if this object is new and was assigned
a new ID. Otherwise return False.
"""
objid = id(obj)
is_new = objid not in self._objs
if is_new:
new_id = len(self._objs)
self._objs[objid] = new_id
return is_new
def _mkref(self, obj):
"""
Log a reference to an in-memory object, and return
if that object should be considered newly logged.
"""
is_new = self._log_ref(obj)
# Pretend the object is new
pretend_new = not self.unpicklable or not self.make_refs
return pretend_new or is_new
def _getref(self, obj):
return {tags.ID: self._objs.get(id(obj))}
def flatten(self, obj, reset=True):
"""Takes an object and returns a JSON-safe representation of it.
Simply returns any of the basic builtin datatypes
>>> p = Pickler()
>>> p.flatten('hello world') == 'hello world'
True
>>> p.flatten(49)
49
>>> p.flatten(350.0)
350.0
>>> p.flatten(True)
True
>>> p.flatten(False)
False
>>> r = p.flatten(None)
>>> r is None
True
>>> p.flatten(False)
False
>>> p.flatten([1, 2, 3, 4])
[1, 2, 3, 4]
>>> p.flatten((1,2,))[tags.TUPLE]
[1, 2]
>>> p.flatten({'key': 'value'}) == {'key': 'value'}
True
"""
if reset:
self.reset()
return self._flatten(obj)
def _flatten(self, obj):
self._push()
return self._pop(self._flatten_obj(obj))
def _flatten_obj(self, obj):
self._seen.append(obj)
max_reached = self._depth == self._max_depth
if max_reached or (not self.make_refs and id(obj) in self._objs):
# break the cycle
flatten_func = repr
else:
flatten_func = self._get_flattener(obj)
if flatten_func is None:
self._pickle_warning(obj)
return None
return flatten_func(obj)
def _list_recurse(self, obj):
return [self._flatten(v) for v in obj]
def _get_flattener(self, obj):
if PY2 and isinstance(obj, file):
return self._flatten_file
if util.is_primitive(obj):
return lambda obj: obj
if util.is_bytes(obj):
return self._flatten_bytestring
list_recurse = self._list_recurse
if util.is_list(obj):
if self._mkref(obj):
return list_recurse
else:
self._push()
return self._getref
# We handle tuples and sets by encoding them in a "(tuple|set)dict"
if util.is_tuple(obj):
if not self.unpicklable:
return list_recurse
return lambda obj: {tags.TUPLE: [self._flatten(v) for v in obj]}
if util.is_set(obj):
if not self.unpicklable:
return list_recurse
return lambda obj: {tags.SET: [self._flatten(v) for v in obj]}
if util.is_dictionary(obj):
return self._flatten_dict_obj
if util.is_type(obj):
return _mktyperef
if util.is_object(obj):
return self._ref_obj_instance
if util.is_module_function(obj):
return self._flatten_function
# instance methods, lambdas, old style classes...
self._pickle_warning(obj)
return None
def _ref_obj_instance(self, obj):
"""Reference an existing object or flatten if new
"""
if self._mkref(obj):
# We've never seen this object so return its
# json representation.
return self._flatten_obj_instance(obj)
# We've seen this object before so place an object
# reference tag in the data. This avoids infinite recursion
# when processing cyclical objects.
return self._getref(obj)
def _flatten_file(self, obj):
"""
Special case file objects
"""
assert not PY3 and isinstance(obj, file)
return None
def _flatten_bytestring(self, obj):
if PY2:
try:
return obj.decode('utf-8')
except:
pass
return {tags.B64: base64.encodestring(obj).decode('utf-8')}
def _flatten_obj_instance(self, obj):
"""Recursively flatten an instance and return a json-friendly dict
"""
data = {}
has_class = hasattr(obj, '__class__')
has_dict = hasattr(obj, '__dict__')
has_slots = not has_dict and hasattr(obj, '__slots__')
has_getnewargs = util.has_method(obj, '__getnewargs__')
has_getnewargs_ex = util.has_method(obj, '__getnewargs_ex__')
has_getinitargs = util.has_method(obj, '__getinitargs__')
has_reduce, has_reduce_ex = util.has_reduce(obj)
# Support objects with __getstate__(); this ensures that
# both __setstate__() and __getstate__() are implemented
has_getstate = hasattr(obj, '__getstate__')
# not using has_method since __getstate__() is handled separately below
if has_class:
cls = obj.__class__
else:
cls = type(obj)
# Check for a custom handler
class_name = util.importable_name(cls)
handler = handlers.get(cls, handlers.get(class_name))
if handler is not None:
if self.unpicklable:
data[tags.OBJECT] = class_name
return handler(self).flatten(obj, data)
reduce_val = None
if self.unpicklable:
if has_reduce and not has_reduce_ex:
try:
reduce_val = obj.__reduce__()
except TypeError:
# A lot of builtin types have a reduce which
# just raises a TypeError
# we ignore those
pass
# test for a reduce implementation, and redirect before
# doing anything else if that is what reduce requests
elif has_reduce_ex:
try:
# we're implementing protocol 2
reduce_val = obj.__reduce_ex__(2)
except TypeError:
# A lot of builtin types have a reduce which
# just raises a TypeError
# we ignore those
pass
if reduce_val and isinstance(reduce_val, (str, unicode)):
try:
varpath = iter(reduce_val.split('.'))
# curmod will be transformed by the
# loop into the value to pickle
curmod = sys.modules[next(varpath)]
for modname in varpath:
curmod = getattr(curmod, modname)
# replace obj with value retrieved
return self._flatten(curmod)
except KeyError:
# well, we can't do anything with that, so we ignore it
pass
elif reduce_val:
# at this point, reduce_val should be some kind of iterable
# pad out to len 5
rv_as_list = list(reduce_val)
insufficiency = 5 - len(rv_as_list)
if insufficiency:
rv_as_list += [None] * insufficiency
if getattr(rv_as_list[0], '__name__', '') == '__newobj__':
rv_as_list[0] = tags.NEWOBJ
f, args, state, listitems, dictitems = rv_as_list
# check that getstate/setstate is sane
if not (state and hasattr(obj, '__getstate__')
and not hasattr(obj, '__setstate__')
and not isinstance(obj, dict)):
# turn iterators to iterables for convenient serialization
if rv_as_list[3]:
rv_as_list[3] = tuple(rv_as_list[3])
if rv_as_list[4]:
rv_as_list[4] = tuple(rv_as_list[4])
data[tags.REDUCE] = list(map(self._flatten, rv_as_list))
return data
if has_class and not util.is_module(obj):
if self.unpicklable:
class_name = util.importable_name(cls)
data[tags.OBJECT] = class_name
if has_getnewargs_ex:
data[tags.NEWARGSEX] = list(
map(self._flatten, obj.__getnewargs_ex__()))
if has_getnewargs and not has_getnewargs_ex:
data[tags.NEWARGS] = self._flatten(obj.__getnewargs__())
if has_getinitargs:
data[tags.INITARGS] = self._flatten(obj.__getinitargs__())
if has_getstate:
try:
state = obj.__getstate__()
except TypeError:
# Has getstate but it cannot be called, e.g. file descriptors
# in Python3
self._pickle_warning(obj)
return None
else:
return self._getstate(state, data)
if util.is_module(obj):
if self.unpicklable:
data[tags.REPR] = '%s/%s' % (obj.__name__,
obj.__name__)
else:
data = unicode(obj)
return data
if util.is_dictionary_subclass(obj):
self._flatten_dict_obj(obj, data)
return data
if util.is_sequence_subclass(obj):
return self._flatten_sequence_obj(obj, data)
if util.is_iterator(obj):
# force list in python 3
data[tags.ITERATOR] = list(map(self._flatten, islice(obj, self._max_iter)))
return data
if has_dict:
# Support objects that subclasses list and set
if util.is_sequence_subclass(obj):
return self._flatten_sequence_obj(obj, data)
# hack for zope persistent objects; this unghostifies the object
getattr(obj, '_', None)
return self._flatten_dict_obj(obj.__dict__, data)
if has_slots:
return self._flatten_newstyle_with_slots(obj, data)
# catchall return for data created above without a return
# (e.g. __getnewargs__ is not supposed to be the end of the story)
if data:
return data
self._pickle_warning(obj)
return None
def _flatten_function(self, obj):
if self.unpicklable:
data = {tags.FUNCTION: util.importable_name(obj)}
else:
data = None
return data
def _flatten_dict_obj(self, obj, data=None):
"""Recursively call flatten() and return json-friendly dict
"""
if data is None:
data = obj.__class__()
flatten = self._flatten_key_value_pair
for k, v in sorted(obj.items(), key=util.itemgetter):
flatten(k, v, data)
# the collections.defaultdict protocol
if hasattr(obj, 'default_factory') and callable(obj.default_factory):
factory = obj.default_factory
if util.is_type(factory):
# Reference the class/type
value = _mktyperef(factory)
else:
# The factory is not a type and could reference e.g. functions
# or even the object instance itself, which creates a cycle.
if self._mkref(factory):
# We've never seen this object before so pickle it in-place.
# Create an instance from the factory and assume that the
# resulting instance is a suitable examplar.
value = self._flatten(handlers.CloneFactory(factory()))
else:
# We've seen this object before.
# Break the cycle by emitting a reference.
value = self._getref(factory)
data['default_factory'] = value
return data
def _flatten_obj_attrs(self, obj, attrs, data):
flatten = self._flatten_key_value_pair
ok = False
for k in attrs:
try:
value = getattr(obj, k)
flatten(k, value, data)
except AttributeError:
# The attribute may have been deleted
continue
ok = True
return ok
def _flatten_newstyle_with_slots(self, obj, data):
"""Return a json-friendly dict for new-style objects with __slots__.
"""
allslots = [_wrap_string_slot(getattr(cls, '__slots__', tuple()))
for cls in obj.__class__.mro()]
if not self._flatten_obj_attrs(obj, chain(*allslots), data):
attrs = [x for x in dir(obj)
if not x.startswith('__') and not x.endswith('__')]
self._flatten_obj_attrs(obj, attrs, data)
return data
def _flatten_key_value_pair(self, k, v, data):
"""Flatten a key/value pair into the passed-in dictionary."""
if not util.is_picklable(k, v):
return data
if self.keys:
if not isinstance(k, (str, unicode)) or k.startswith(tags.JSON_KEY):
k = self._escape_key(k)
else:
if k is None:
k = 'null' # for compatibility with common json encoders
if self.numeric_keys and isinstance(k, numeric_types):
pass
elif not isinstance(k, (str, unicode)):
try:
k = repr(k)
except:
k = unicode(k)
data[k] = self._flatten(v)
return data
def _flatten_sequence_obj(self, obj, data):
"""Return a json-friendly dict for a sequence subclass."""
if hasattr(obj, '__dict__'):
self._flatten_dict_obj(obj.__dict__, data)
value = [self._flatten(v) for v in obj]
if self.unpicklable:
data[tags.SEQ] = value
else:
return value
return data
def _escape_key(self, k):
return tags.JSON_KEY + encode(k,
reset=False, keys=True,
context=self, backend=self.backend,
make_refs=self.make_refs)
def _getstate(self, obj, data):
state = self._flatten_obj(obj)
if self.unpicklable:
data[tags.STATE] = state
else:
data = state
return data
def _pickle_warning(self, obj):
if self.warn:
msg = 'jsonpickle cannot pickle %r: replaced with None' % obj
warnings.warn(msg)
def _mktyperef(obj):
"""Return a typeref dictionary
>>> _mktyperef(AssertionError) == {'py/type': '__builtin__.AssertionError'}
True
"""
return {tags.TYPE: util.importable_name(obj)}
def _wrap_string_slot(string):
"""Converts __slots__ = 'a' into __slots__ = ('a',)
"""
if isinstance(string, (str, unicode)):
return (string,)
return string
| 32.961957
| 87
| 0.547843
|
from __future__ import absolute_import, division, unicode_literals
import base64
import warnings
import sys
from itertools import chain, islice
from . import util
from . import tags
from . import handlers
from .backend import JSONBackend
from .compat import numeric_types, unicode, PY3, PY2
def encode(value,
unpicklable=False,
make_refs=True,
keys=False,
max_depth=None,
reset=True,
backend=None,
warn=False,
context=None,
max_iter=None,
numeric_keys=False):
backend = _make_backend(backend)
if context is None:
context = Pickler(unpicklable=unpicklable,
make_refs=make_refs,
keys=keys,
backend=backend,
max_depth=max_depth,
warn=warn,
max_iter=max_iter,
numeric_keys=numeric_keys)
return backend.encode(context.flatten(value, reset=reset))
def _make_backend(backend):
if backend is None:
return JSONBackend()
else:
return backend
class Pickler(object):
def __init__(self,
unpicklable=True,
make_refs=True,
max_depth=None,
backend=None,
keys=False,
warn=False,
max_iter=None,
numeric_keys=False):
self.unpicklable = unpicklable
self.make_refs = make_refs
self.backend = _make_backend(backend)
self.keys = keys
self.warn = warn
self.numeric_keys = numeric_keys
self._depth = -1
self._max_depth = max_depth
self._objs = {}
self._seen = []
self._max_iter = max_iter
def reset(self):
self._objs = {}
self._depth = -1
self._seen = []
def _push(self):
self._depth += 1
def _pop(self, value):
self._depth -= 1
if self._depth == -1:
self.reset()
return value
def _log_ref(self, obj):
objid = id(obj)
is_new = objid not in self._objs
if is_new:
new_id = len(self._objs)
self._objs[objid] = new_id
return is_new
def _mkref(self, obj):
is_new = self._log_ref(obj)
pretend_new = not self.unpicklable or not self.make_refs
return pretend_new or is_new
def _getref(self, obj):
return {tags.ID: self._objs.get(id(obj))}
def flatten(self, obj, reset=True):
if reset:
self.reset()
return self._flatten(obj)
def _flatten(self, obj):
self._push()
return self._pop(self._flatten_obj(obj))
def _flatten_obj(self, obj):
self._seen.append(obj)
max_reached = self._depth == self._max_depth
if max_reached or (not self.make_refs and id(obj) in self._objs):
flatten_func = repr
else:
flatten_func = self._get_flattener(obj)
if flatten_func is None:
self._pickle_warning(obj)
return None
return flatten_func(obj)
def _list_recurse(self, obj):
return [self._flatten(v) for v in obj]
def _get_flattener(self, obj):
if PY2 and isinstance(obj, file):
return self._flatten_file
if util.is_primitive(obj):
return lambda obj: obj
if util.is_bytes(obj):
return self._flatten_bytestring
list_recurse = self._list_recurse
if util.is_list(obj):
if self._mkref(obj):
return list_recurse
else:
self._push()
return self._getref
if util.is_tuple(obj):
if not self.unpicklable:
return list_recurse
return lambda obj: {tags.TUPLE: [self._flatten(v) for v in obj]}
if util.is_set(obj):
if not self.unpicklable:
return list_recurse
return lambda obj: {tags.SET: [self._flatten(v) for v in obj]}
if util.is_dictionary(obj):
return self._flatten_dict_obj
if util.is_type(obj):
return _mktyperef
if util.is_object(obj):
return self._ref_obj_instance
if util.is_module_function(obj):
return self._flatten_function
self._pickle_warning(obj)
return None
def _ref_obj_instance(self, obj):
if self._mkref(obj):
# json representation.
return self._flatten_obj_instance(obj)
# We've seen this object before so place an object
return self._getref(obj)
def _flatten_file(self, obj):
assert not PY3 and isinstance(obj, file)
return None
def _flatten_bytestring(self, obj):
if PY2:
try:
return obj.decode('utf-8')
except:
pass
return {tags.B64: base64.encodestring(obj).decode('utf-8')}
def _flatten_obj_instance(self, obj):
data = {}
has_class = hasattr(obj, '__class__')
has_dict = hasattr(obj, '__dict__')
has_slots = not has_dict and hasattr(obj, '__slots__')
has_getnewargs = util.has_method(obj, '__getnewargs__')
has_getnewargs_ex = util.has_method(obj, '__getnewargs_ex__')
has_getinitargs = util.has_method(obj, '__getinitargs__')
has_reduce, has_reduce_ex = util.has_reduce(obj)
has_getstate = hasattr(obj, '__getstate__')
if has_class:
cls = obj.__class__
else:
cls = type(obj)
class_name = util.importable_name(cls)
handler = handlers.get(cls, handlers.get(class_name))
if handler is not None:
if self.unpicklable:
data[tags.OBJECT] = class_name
return handler(self).flatten(obj, data)
reduce_val = None
if self.unpicklable:
if has_reduce and not has_reduce_ex:
try:
reduce_val = obj.__reduce__()
except TypeError:
pass
elif has_reduce_ex:
try:
reduce_val = obj.__reduce_ex__(2)
except TypeError:
# A lot of builtin types have a reduce which
# just raises a TypeError
# we ignore those
pass
if reduce_val and isinstance(reduce_val, (str, unicode)):
try:
varpath = iter(reduce_val.split('.'))
# curmod will be transformed by the
# loop into the value to pickle
curmod = sys.modules[next(varpath)]
for modname in varpath:
curmod = getattr(curmod, modname)
# replace obj with value retrieved
return self._flatten(curmod)
except KeyError:
# well, we can't do anything with that, so we ignore it
pass
elif reduce_val:
rv_as_list = list(reduce_val)
insufficiency = 5 - len(rv_as_list)
if insufficiency:
rv_as_list += [None] * insufficiency
if getattr(rv_as_list[0], '__name__', '') == '__newobj__':
rv_as_list[0] = tags.NEWOBJ
f, args, state, listitems, dictitems = rv_as_list
if not (state and hasattr(obj, '__getstate__')
and not hasattr(obj, '__setstate__')
and not isinstance(obj, dict)):
if rv_as_list[3]:
rv_as_list[3] = tuple(rv_as_list[3])
if rv_as_list[4]:
rv_as_list[4] = tuple(rv_as_list[4])
data[tags.REDUCE] = list(map(self._flatten, rv_as_list))
return data
if has_class and not util.is_module(obj):
if self.unpicklable:
class_name = util.importable_name(cls)
data[tags.OBJECT] = class_name
if has_getnewargs_ex:
data[tags.NEWARGSEX] = list(
map(self._flatten, obj.__getnewargs_ex__()))
if has_getnewargs and not has_getnewargs_ex:
data[tags.NEWARGS] = self._flatten(obj.__getnewargs__())
if has_getinitargs:
data[tags.INITARGS] = self._flatten(obj.__getinitargs__())
if has_getstate:
try:
state = obj.__getstate__()
except TypeError:
self._pickle_warning(obj)
return None
else:
return self._getstate(state, data)
if util.is_module(obj):
if self.unpicklable:
data[tags.REPR] = '%s/%s' % (obj.__name__,
obj.__name__)
else:
data = unicode(obj)
return data
if util.is_dictionary_subclass(obj):
self._flatten_dict_obj(obj, data)
return data
if util.is_sequence_subclass(obj):
return self._flatten_sequence_obj(obj, data)
if util.is_iterator(obj):
data[tags.ITERATOR] = list(map(self._flatten, islice(obj, self._max_iter)))
return data
if has_dict:
if util.is_sequence_subclass(obj):
return self._flatten_sequence_obj(obj, data)
getattr(obj, '_', None)
return self._flatten_dict_obj(obj.__dict__, data)
if has_slots:
return self._flatten_newstyle_with_slots(obj, data)
if data:
return data
self._pickle_warning(obj)
return None
def _flatten_function(self, obj):
if self.unpicklable:
data = {tags.FUNCTION: util.importable_name(obj)}
else:
data = None
return data
def _flatten_dict_obj(self, obj, data=None):
if data is None:
data = obj.__class__()
flatten = self._flatten_key_value_pair
for k, v in sorted(obj.items(), key=util.itemgetter):
flatten(k, v, data)
if hasattr(obj, 'default_factory') and callable(obj.default_factory):
factory = obj.default_factory
if util.is_type(factory):
value = _mktyperef(factory)
else:
if self._mkref(factory):
# Create an instance from the factory and assume that the
# resulting instance is a suitable examplar.
value = self._flatten(handlers.CloneFactory(factory()))
else:
# We've seen this object before.
value = self._getref(factory)
data['default_factory'] = value
return data
def _flatten_obj_attrs(self, obj, attrs, data):
flatten = self._flatten_key_value_pair
ok = False
for k in attrs:
try:
value = getattr(obj, k)
flatten(k, value, data)
except AttributeError:
continue
ok = True
return ok
def _flatten_newstyle_with_slots(self, obj, data):
allslots = [_wrap_string_slot(getattr(cls, '__slots__', tuple()))
for cls in obj.__class__.mro()]
if not self._flatten_obj_attrs(obj, chain(*allslots), data):
attrs = [x for x in dir(obj)
if not x.startswith('__') and not x.endswith('__')]
self._flatten_obj_attrs(obj, attrs, data)
return data
def _flatten_key_value_pair(self, k, v, data):
if not util.is_picklable(k, v):
return data
if self.keys:
if not isinstance(k, (str, unicode)) or k.startswith(tags.JSON_KEY):
k = self._escape_key(k)
else:
if k is None:
k = 'null'
if self.numeric_keys and isinstance(k, numeric_types):
pass
elif not isinstance(k, (str, unicode)):
try:
k = repr(k)
except:
k = unicode(k)
data[k] = self._flatten(v)
return data
def _flatten_sequence_obj(self, obj, data):
if hasattr(obj, '__dict__'):
self._flatten_dict_obj(obj.__dict__, data)
value = [self._flatten(v) for v in obj]
if self.unpicklable:
data[tags.SEQ] = value
else:
return value
return data
def _escape_key(self, k):
return tags.JSON_KEY + encode(k,
reset=False, keys=True,
context=self, backend=self.backend,
make_refs=self.make_refs)
def _getstate(self, obj, data):
state = self._flatten_obj(obj)
if self.unpicklable:
data[tags.STATE] = state
else:
data = state
return data
def _pickle_warning(self, obj):
if self.warn:
msg = 'jsonpickle cannot pickle %r: replaced with None' % obj
warnings.warn(msg)
def _mktyperef(obj):
return {tags.TYPE: util.importable_name(obj)}
def _wrap_string_slot(string):
if isinstance(string, (str, unicode)):
return (string,)
return string
| true
| true
|
1c414df555fb5193e1798c9639acfa8994ea9cdb
| 42
|
py
|
Python
|
test/__init__.py
|
arthurtofani/footprint
|
572401d4cba3299ae9915fca2a7d08ea1a3a9bc4
|
[
"MIT"
] | null | null | null |
test/__init__.py
|
arthurtofani/footprint
|
572401d4cba3299ae9915fca2a7d08ea1a3a9bc4
|
[
"MIT"
] | null | null | null |
test/__init__.py
|
arthurtofani/footprint
|
572401d4cba3299ae9915fca2a7d08ea1a3a9bc4
|
[
"MIT"
] | null | null | null |
from . import audio
from . import project
| 14
| 21
| 0.761905
|
from . import audio
from . import project
| true
| true
|
1c414e07399e5fbe74a7cf5f9b12c516dc90deb9
| 814
|
py
|
Python
|
xlsxwriter/test/comparison/test_textbox33.py
|
timgates42/XlsxWriter
|
129044ed821de67895b4562c6b71f90eba5be6b4
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
xlsxwriter/test/comparison/test_textbox33.py
|
timgates42/XlsxWriter
|
129044ed821de67895b4562c6b71f90eba5be6b4
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
xlsxwriter/test/comparison/test_textbox33.py
|
timgates42/XlsxWriter
|
129044ed821de67895b4562c6b71f90eba5be6b4
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2020, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('textbox33.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with textbox(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_textbox('E9', 'This is some text', {'text_rotation': 270})
workbook.close()
self.assertExcelEqual()
| 23.941176
| 83
| 0.615479
| true
| true
|
|
1c414ec054ba0da54f927fb23732049665126be2
| 799
|
py
|
Python
|
octopus/task/utils.py
|
quaintm/octopus
|
95a732207ee5f43cd0065d8ea6c643cbf3df2d61
|
[
"BSD-3-Clause"
] | null | null | null |
octopus/task/utils.py
|
quaintm/octopus
|
95a732207ee5f43cd0065d8ea6c643cbf3df2d61
|
[
"BSD-3-Clause"
] | null | null | null |
octopus/task/utils.py
|
quaintm/octopus
|
95a732207ee5f43cd0065d8ea6c643cbf3df2d61
|
[
"BSD-3-Clause"
] | null | null | null |
from flask import flash
from flask.ext.login import current_user
from octopus.models import User, Task
def create_query(args, q):
valid = True
user_id = args.get('user_id')
type = args.get('type')
if user_id:
if user_id == "me":
user = User.get_by_id(current_user.id)
else:
try:
user_id = int(user_id)
user = User.get_by_id(user_id)
except ValueError:
flash('Invalid User Id Entered')
valid = False
if valid:
if type == 'assigned':
q = q.filter(Task.assignees.any(id=user.id))
title = "My Assigned Tasks"
elif type == 'created':
q = q.filter(Task.creator_id==user.id)
title = "My Created Tasks"
else:
None
title = "Query Results"
return valid, q, title
| 21.594595
| 52
| 0.598248
|
from flask import flash
from flask.ext.login import current_user
from octopus.models import User, Task
def create_query(args, q):
valid = True
user_id = args.get('user_id')
type = args.get('type')
if user_id:
if user_id == "me":
user = User.get_by_id(current_user.id)
else:
try:
user_id = int(user_id)
user = User.get_by_id(user_id)
except ValueError:
flash('Invalid User Id Entered')
valid = False
if valid:
if type == 'assigned':
q = q.filter(Task.assignees.any(id=user.id))
title = "My Assigned Tasks"
elif type == 'created':
q = q.filter(Task.creator_id==user.id)
title = "My Created Tasks"
else:
None
title = "Query Results"
return valid, q, title
| true
| true
|
1c414f10a2e3083a64cdc99b0ec3e514d4ff6780
| 2,133
|
py
|
Python
|
teach.py
|
JMagee03/BabysFirstWords
|
31245b499c34cf4deaab7011c74a8aac265426a2
|
[
"Apache-2.0"
] | null | null | null |
teach.py
|
JMagee03/BabysFirstWords
|
31245b499c34cf4deaab7011c74a8aac265426a2
|
[
"Apache-2.0"
] | null | null | null |
teach.py
|
JMagee03/BabysFirstWords
|
31245b499c34cf4deaab7011c74a8aac265426a2
|
[
"Apache-2.0"
] | null | null | null |
from teachFiles import files, greetings, operations, quizes, breakQues, correct, readSentence, playVideo
def teach():
learning_data = files()
greetings(0)
end_game = 1
while end_game != 0:
res = operations()
print(res)
section = 1
# name = section(res)
if res[0] == 3:
while section != 2 and end_game != 0:
questions = quizes()[1][res[1]]
result = []
print(questions)
print(
"***********************************************************\n***********************************************************\n")
for ques in questions[0]:
ans = input(ques)
result.append(ans)
print(
"\n***********************************************************\n***********************************************************")
print(
"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\nRESULT\nxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx")
correct(questions[1], result)
print("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx")
break_k = breakQues()
end_game = break_k[1]
section = break_k[0]
elif res[0] == 4:
playVideo(res[1])
else:
while section != 2 and end_game != 0:
print(
"***********************************************************\n***********************************************************\n")
for text in learning_data[res[0]][res[1]]:
print(text + "\n")
readSentence(text)
print(
"\n***********************************************************\n***********************************************************")
break_k = breakQues()
end_game = break_k[1]
section = break_k[0]
teach()
| 41.823529
| 150
| 0.35443
|
from teachFiles import files, greetings, operations, quizes, breakQues, correct, readSentence, playVideo
def teach():
learning_data = files()
greetings(0)
end_game = 1
while end_game != 0:
res = operations()
print(res)
section = 1
if res[0] == 3:
while section != 2 and end_game != 0:
questions = quizes()[1][res[1]]
result = []
print(questions)
print(
"***********************************************************\n***********************************************************\n")
for ques in questions[0]:
ans = input(ques)
result.append(ans)
print(
"\n***********************************************************\n***********************************************************")
print(
"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\nRESULT\nxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx")
correct(questions[1], result)
print("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx")
break_k = breakQues()
end_game = break_k[1]
section = break_k[0]
elif res[0] == 4:
playVideo(res[1])
else:
while section != 2 and end_game != 0:
print(
"***********************************************************\n***********************************************************\n")
for text in learning_data[res[0]][res[1]]:
print(text + "\n")
readSentence(text)
print(
"\n***********************************************************\n***********************************************************")
break_k = breakQues()
end_game = break_k[1]
section = break_k[0]
teach()
| true
| true
|
1c414f959bd6404040555021e10416c878fce4db
| 7,733
|
py
|
Python
|
py/vtproto/vttest_pb2.py
|
paralin/vitess
|
7b048c5442679ce6cf48773cf17a184c1ce91295
|
[
"Apache-2.0"
] | 8
|
2017-08-14T15:19:04.000Z
|
2021-06-07T10:36:52.000Z
|
py/vtproto/vttest_pb2.py
|
paralin/vitess
|
7b048c5442679ce6cf48773cf17a184c1ce91295
|
[
"Apache-2.0"
] | 19
|
2020-09-25T15:41:41.000Z
|
2022-03-25T23:06:54.000Z
|
py/vtproto/vttest_pb2.py
|
paralin/vitess
|
7b048c5442679ce6cf48773cf17a184c1ce91295
|
[
"Apache-2.0"
] | 7
|
2021-03-07T03:24:39.000Z
|
2022-02-16T06:46:10.000Z
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: vttest.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='vttest.proto',
package='vttest',
syntax='proto3',
serialized_options=_b('Z#vitess.io/vitess/go/vt/proto/vttest'),
serialized_pb=_b('\n\x0cvttest.proto\x12\x06vttest\"/\n\x05Shard\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x18\n\x10\x64\x62_name_override\x18\x02 \x01(\t\"\xb5\x01\n\x08Keyspace\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1d\n\x06shards\x18\x02 \x03(\x0b\x32\r.vttest.Shard\x12\x1c\n\x14sharding_column_name\x18\x03 \x01(\t\x12\x1c\n\x14sharding_column_type\x18\x04 \x01(\t\x12\x13\n\x0bserved_from\x18\x05 \x01(\t\x12\x15\n\rreplica_count\x18\x06 \x01(\x05\x12\x14\n\x0crdonly_count\x18\x07 \x01(\x05\"D\n\x0eVTTestTopology\x12#\n\tkeyspaces\x18\x01 \x03(\x0b\x32\x10.vttest.Keyspace\x12\r\n\x05\x63\x65lls\x18\x02 \x03(\tB%Z#vitess.io/vitess/go/vt/proto/vttestb\x06proto3')
)
_SHARD = _descriptor.Descriptor(
name='Shard',
full_name='vttest.Shard',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='vttest.Shard.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='db_name_override', full_name='vttest.Shard.db_name_override', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=24,
serialized_end=71,
)
_KEYSPACE = _descriptor.Descriptor(
name='Keyspace',
full_name='vttest.Keyspace',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='vttest.Keyspace.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='shards', full_name='vttest.Keyspace.shards', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sharding_column_name', full_name='vttest.Keyspace.sharding_column_name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sharding_column_type', full_name='vttest.Keyspace.sharding_column_type', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='served_from', full_name='vttest.Keyspace.served_from', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='replica_count', full_name='vttest.Keyspace.replica_count', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rdonly_count', full_name='vttest.Keyspace.rdonly_count', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=74,
serialized_end=255,
)
_VTTESTTOPOLOGY = _descriptor.Descriptor(
name='VTTestTopology',
full_name='vttest.VTTestTopology',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='keyspaces', full_name='vttest.VTTestTopology.keyspaces', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cells', full_name='vttest.VTTestTopology.cells', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=257,
serialized_end=325,
)
_KEYSPACE.fields_by_name['shards'].message_type = _SHARD
_VTTESTTOPOLOGY.fields_by_name['keyspaces'].message_type = _KEYSPACE
DESCRIPTOR.message_types_by_name['Shard'] = _SHARD
DESCRIPTOR.message_types_by_name['Keyspace'] = _KEYSPACE
DESCRIPTOR.message_types_by_name['VTTestTopology'] = _VTTESTTOPOLOGY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Shard = _reflection.GeneratedProtocolMessageType('Shard', (_message.Message,), dict(
DESCRIPTOR = _SHARD,
__module__ = 'vttest_pb2'
# @@protoc_insertion_point(class_scope:vttest.Shard)
))
_sym_db.RegisterMessage(Shard)
Keyspace = _reflection.GeneratedProtocolMessageType('Keyspace', (_message.Message,), dict(
DESCRIPTOR = _KEYSPACE,
__module__ = 'vttest_pb2'
# @@protoc_insertion_point(class_scope:vttest.Keyspace)
))
_sym_db.RegisterMessage(Keyspace)
VTTestTopology = _reflection.GeneratedProtocolMessageType('VTTestTopology', (_message.Message,), dict(
DESCRIPTOR = _VTTESTTOPOLOGY,
__module__ = 'vttest_pb2'
# @@protoc_insertion_point(class_scope:vttest.VTTestTopology)
))
_sym_db.RegisterMessage(VTTestTopology)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 37.357488
| 669
| 0.73723
|
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='vttest.proto',
package='vttest',
syntax='proto3',
serialized_options=_b('Z#vitess.io/vitess/go/vt/proto/vttest'),
serialized_pb=_b('\n\x0cvttest.proto\x12\x06vttest\"/\n\x05Shard\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x18\n\x10\x64\x62_name_override\x18\x02 \x01(\t\"\xb5\x01\n\x08Keyspace\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1d\n\x06shards\x18\x02 \x03(\x0b\x32\r.vttest.Shard\x12\x1c\n\x14sharding_column_name\x18\x03 \x01(\t\x12\x1c\n\x14sharding_column_type\x18\x04 \x01(\t\x12\x13\n\x0bserved_from\x18\x05 \x01(\t\x12\x15\n\rreplica_count\x18\x06 \x01(\x05\x12\x14\n\x0crdonly_count\x18\x07 \x01(\x05\"D\n\x0eVTTestTopology\x12#\n\tkeyspaces\x18\x01 \x03(\x0b\x32\x10.vttest.Keyspace\x12\r\n\x05\x63\x65lls\x18\x02 \x03(\tB%Z#vitess.io/vitess/go/vt/proto/vttestb\x06proto3')
)
_SHARD = _descriptor.Descriptor(
name='Shard',
full_name='vttest.Shard',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='vttest.Shard.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='db_name_override', full_name='vttest.Shard.db_name_override', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=24,
serialized_end=71,
)
_KEYSPACE = _descriptor.Descriptor(
name='Keyspace',
full_name='vttest.Keyspace',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='vttest.Keyspace.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='shards', full_name='vttest.Keyspace.shards', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sharding_column_name', full_name='vttest.Keyspace.sharding_column_name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sharding_column_type', full_name='vttest.Keyspace.sharding_column_type', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='served_from', full_name='vttest.Keyspace.served_from', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='replica_count', full_name='vttest.Keyspace.replica_count', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rdonly_count', full_name='vttest.Keyspace.rdonly_count', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=74,
serialized_end=255,
)
_VTTESTTOPOLOGY = _descriptor.Descriptor(
name='VTTestTopology',
full_name='vttest.VTTestTopology',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='keyspaces', full_name='vttest.VTTestTopology.keyspaces', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cells', full_name='vttest.VTTestTopology.cells', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=257,
serialized_end=325,
)
_KEYSPACE.fields_by_name['shards'].message_type = _SHARD
_VTTESTTOPOLOGY.fields_by_name['keyspaces'].message_type = _KEYSPACE
DESCRIPTOR.message_types_by_name['Shard'] = _SHARD
DESCRIPTOR.message_types_by_name['Keyspace'] = _KEYSPACE
DESCRIPTOR.message_types_by_name['VTTestTopology'] = _VTTESTTOPOLOGY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Shard = _reflection.GeneratedProtocolMessageType('Shard', (_message.Message,), dict(
DESCRIPTOR = _SHARD,
__module__ = 'vttest_pb2'
# @@protoc_insertion_point(class_scope:vttest.Shard)
))
_sym_db.RegisterMessage(Shard)
Keyspace = _reflection.GeneratedProtocolMessageType('Keyspace', (_message.Message,), dict(
DESCRIPTOR = _KEYSPACE,
__module__ = 'vttest_pb2'
# @@protoc_insertion_point(class_scope:vttest.Keyspace)
))
_sym_db.RegisterMessage(Keyspace)
VTTestTopology = _reflection.GeneratedProtocolMessageType('VTTestTopology', (_message.Message,), dict(
DESCRIPTOR = _VTTESTTOPOLOGY,
__module__ = 'vttest_pb2'
# @@protoc_insertion_point(class_scope:vttest.VTTestTopology)
))
_sym_db.RegisterMessage(VTTestTopology)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| true
| true
|
1c41506e1f1ff6771be6cdc6dfbec700f86ff83a
| 14,332
|
py
|
Python
|
scripts/devops_tasks/common_tasks.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | null | null | null |
scripts/devops_tasks/common_tasks.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | null | null | null |
scripts/devops_tasks/common_tasks.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Below are common methods for the devops build steps. This is the common location that will be updated with
# package targeting during release.
import glob
from subprocess import check_call, CalledProcessError
import os
import errno
import shutil
import sys
import logging
import ast
import textwrap
import io
import re
import pdb
# Assumes the presence of setuptools
from pkg_resources import parse_version, parse_requirements, Requirement, WorkingSet, working_set
# this assumes the presence of "packaging"
from packaging.specifiers import SpecifierSet
from packaging.version import Version
DEV_REQ_FILE = "dev_requirements.txt"
NEW_DEV_REQ_FILE = "new_dev_requirements.txt"
logging.getLogger().setLevel(logging.INFO)
OMITTED_CI_PACKAGES = [
"azure-mgmt-documentdb",
"azure-servicemanagement-legacy",
"azure-mgmt-scheduler",
"azure",
"azure-mgmt",
"azure-storage",
]
MANAGEMENT_PACKAGE_IDENTIFIERS = [
"mgmt",
"azure-cognitiveservices",
"azure-servicefabric",
"nspkg",
"azure-keyvault",
"azure-synapse"
]
META_PACKAGES = ["azure", "azure-mgmt", "azure-keyvault"]
REGRESSION_EXCLUDED_PACKAGES = [
"azure-common",
]
MANAGEMENT_PACKAGES_FILTER_EXCLUSIONS = [
"azure-mgmt-core",
]
omit_regression = (
lambda x: "nspkg" not in x
and "mgmt" not in x
and os.path.basename(x) not in MANAGEMENT_PACKAGE_IDENTIFIERS
and os.path.basename(x) not in META_PACKAGES
and os.path.basename(x) not in REGRESSION_EXCLUDED_PACKAGES
)
omit_docs = lambda x: "nspkg" not in x and os.path.basename(x) not in META_PACKAGES
omit_build = lambda x: x # Dummy lambda to match omit type
lambda_filter_azure_pkg = lambda x: x.startswith("azure") and "-nspkg" not in x
omit_mgmt = lambda x: "mgmt" not in x or os.path.basename(x) in MANAGEMENT_PACKAGES_FILTER_EXCLUSIONS
# dict of filter type and filter function
omit_funct_dict = {
"Build": omit_build,
"Docs": omit_docs,
"Regression": omit_regression,
"Omit_management": omit_mgmt,
}
def log_file(file_location, is_error=False):
with open(file_location, "r") as file:
for line in file:
sys.stdout.write(line)
sys.stdout.write("\n")
sys.stdout.flush()
def read_file(file_location):
str_buffer = ""
with open(file_location, "r") as file:
for line in file:
str_buffer += line
return str_buffer
def cleanup_folder(target_folder):
for file in os.listdir(target_folder):
file_path = os.path.join(target_folder, file)
try:
if os.path.isfile(file_path):
os.remove(file_path)
except Exception as e:
logging.error(e)
# helper functions
def clean_coverage(coverage_dir):
try:
os.mkdir(coverage_dir)
except OSError as e:
if e.errno == errno.EEXIST:
logging.info("Coverage dir already exists. Cleaning.")
cleanup_folder(coverage_dir)
else:
raise
def parse_setup(setup_path):
setup_filename = os.path.join(setup_path, "setup.py")
mock_setup = textwrap.dedent(
"""\
def setup(*args, **kwargs):
__setup_calls__.append((args, kwargs))
"""
)
parsed_mock_setup = ast.parse(mock_setup, filename=setup_filename)
with io.open(setup_filename, "r", encoding="utf-8-sig") as setup_file:
parsed = ast.parse(setup_file.read())
for index, node in enumerate(parsed.body[:]):
if (
not isinstance(node, ast.Expr)
or not isinstance(node.value, ast.Call)
or not hasattr(node.value.func, "id")
or node.value.func.id != "setup"
):
continue
parsed.body[index:index] = parsed_mock_setup.body
break
fixed = ast.fix_missing_locations(parsed)
codeobj = compile(fixed, setup_filename, "exec")
local_vars = {}
global_vars = {"__setup_calls__": []}
current_dir = os.getcwd()
working_dir = os.path.dirname(setup_filename)
os.chdir(working_dir)
exec(codeobj, global_vars, local_vars)
os.chdir(current_dir)
_, kwargs = global_vars["__setup_calls__"][0]
try:
python_requires = kwargs["python_requires"]
# most do not define this, fall back to what we define as universal
except KeyError as e:
python_requires = ">=2.7"
version = kwargs["version"]
name = kwargs["name"]
requires = []
if "install_requires" in kwargs:
requires = kwargs["install_requires"]
return name, version, python_requires, requires
def parse_requirements_file(file_location):
with open(file_location, "r") as f:
reqs = f.read()
return dict((req.name, req) for req in parse_requirements(reqs))
def parse_setup_requires(setup_path):
_, _, python_requires, _ = parse_setup(setup_path)
return python_requires
def filter_for_compatibility(package_set):
collected_packages = []
v = sys.version_info
running_major_version = Version(".".join([str(v[0]), str(v[1]), str(v[2])]))
for pkg in package_set:
spec_set = SpecifierSet(parse_setup_requires(pkg))
if running_major_version in spec_set:
collected_packages.append(pkg)
return collected_packages
# this function is where a glob string gets translated to a list of packages
# It is called by both BUILD (package) and TEST. In the future, this function will be the central location
# for handling targeting of release packages
def process_glob_string(
glob_string,
target_root_dir,
additional_contains_filter="",
filter_type="Build",
):
if glob_string:
individual_globs = glob_string.split(",")
else:
individual_globs = "azure-*"
collected_top_level_directories = []
for glob_string in individual_globs:
globbed = glob.glob(
os.path.join(target_root_dir, glob_string, "setup.py")
) + glob.glob(os.path.join(target_root_dir, "sdk/*/", glob_string, "setup.py"))
collected_top_level_directories.extend([os.path.dirname(p) for p in globbed])
# dedup, in case we have double coverage from the glob strings. Example: "azure-mgmt-keyvault,azure-mgmt-*"
collected_directories = list(
set(
[
p
for p in collected_top_level_directories
if additional_contains_filter in p
]
)
)
# if we have individually queued this specific package, it's obvious that we want to build it specifically
# in this case, do not honor the omission list
if len(collected_directories) == 1:
pkg_set_ci_filtered = filter_for_compatibility(collected_directories)
# however, if there are multiple packages being built, we should honor the omission list and NOT build the omitted
# packages
else:
allowed_package_set = remove_omitted_packages(collected_directories)
pkg_set_ci_filtered = filter_for_compatibility(allowed_package_set)
# Apply filter based on filter type. for e.g. Docs, Regression, Management
pkg_set_ci_filtered = list(filter(omit_funct_dict.get(filter_type, omit_build), pkg_set_ci_filtered))
logging.info(
"Target packages after filtering by CI: {}".format(
pkg_set_ci_filtered
)
)
logging.info(
"Package(s) omitted by CI filter: {}".format(
list(set(collected_directories) - set(pkg_set_ci_filtered))
)
)
return sorted(pkg_set_ci_filtered)
def remove_omitted_packages(collected_directories):
packages = [
package_dir
for package_dir in collected_directories
if os.path.basename(package_dir) not in OMITTED_CI_PACKAGES
]
return packages
def run_check_call(
command_array,
working_directory,
acceptable_return_codes=[],
run_as_shell=False,
always_exit=True,
):
try:
if run_as_shell:
logging.info(
"Command Array: {0}, Target Working Directory: {1}".format(
" ".join(command_array), working_directory
)
)
check_call(" ".join(command_array), cwd=working_directory, shell=True)
else:
logging.info(
"Command Array: {0}, Target Working Directory: {1}".format(
command_array, working_directory
)
)
check_call(command_array, cwd=working_directory)
except CalledProcessError as err:
if err.returncode not in acceptable_return_codes:
logging.error(err) # , file = sys.stderr
if always_exit:
exit(1)
else:
return err
# This function generates code coverage parameters
def create_code_coverage_params(parsed_args, package_name):
coverage_args = []
if parsed_args.disablecov:
logging.info("Code coverage disabled as per the flag(--disablecov)")
coverage_args.append("--no-cov")
else:
current_package_name = package_name.replace("-", ".")
coverage_args.append("--cov={}".format(current_package_name))
logging.info(
"Code coverage is enabled for package {0}, pytest arguements: {1}".format(
current_package_name, coverage_args
)
)
return coverage_args
# This function returns if error code 5 is allowed for a given package
def is_error_code_5_allowed(target_pkg, pkg_name):
if (
all(
map(
lambda x: any(
[pkg_id in x for pkg_id in MANAGEMENT_PACKAGE_IDENTIFIERS]
),
[target_pkg],
)
)
or pkg_name in MANAGEMENT_PACKAGE_IDENTIFIERS
):
return True
else:
return False
# This function parses requirement and return package name and specifier
def parse_require(req):
req_object = Requirement.parse(req)
pkg_name = req_object.key
spec = SpecifierSet(str(req_object).replace(pkg_name, ""))
return [pkg_name, spec]
def find_whl(package_name, version, whl_directory):
if not os.path.exists(whl_directory):
logging.error("Whl directory is incorrect")
exit(1)
logging.info("Searching whl for package {}".format(package_name))
whl_name = "{0}-{1}*.whl".format(package_name.replace("-", "_"), version)
paths = glob.glob(os.path.join(whl_directory, whl_name))
if not paths:
logging.error(
"whl is not found in whl directory {0} for package {1}".format(
whl_directory, package_name
)
)
exit(1)
return paths[0]
# This method installs package from a pre-built whl
def install_package_from_whl(
package_whl_path, working_dir, python_sym_link=sys.executable
):
commands = [python_sym_link, "-m", "pip", "install", package_whl_path]
run_check_call(commands, working_dir)
logging.info("Installed package from {}".format(package_whl_path))
def filter_dev_requirements(pkg_root_path, packages_to_exclude, dest_dir):
# This method returns list of requirements from dev_requirements by filtering out packages in given list
dev_req_path = os.path.join(pkg_root_path, DEV_REQ_FILE)
if not os.path.exists(dev_req_path):
logging.info("{0} is not found in package root {1}".format(DEV_REQ_FILE, pkg_root_path))
return ""
requirements = []
with open(dev_req_path, "r") as dev_req_file:
requirements = dev_req_file.readlines()
# filter any package given in excluded list
requirements = [
req
for req in requirements
if os.path.basename(req.replace("\n", "")) not in packages_to_exclude
]
logging.info("Filtered dev requirements: {}".format(requirements))
# create new dev requirements file with different name for filtered requirements
new_dev_req_path = os.path.join(dest_dir, NEW_DEV_REQ_FILE)
with open(new_dev_req_path, "w") as dev_req_file:
dev_req_file.writelines(requirements)
return new_dev_req_path
def is_required_version_on_pypi(package_name, spec):
from pypi_tools.pypi import PyPIClient
client = PyPIClient()
versions = []
try:
versions = [str(v) for v in client.get_ordered_versions(package_name) if str(v) in spec]
except:
logging.error("Package {} is not found on PyPI", package_name)
return versions
def find_packages_missing_on_pypi(path):
import pkginfo
requires = []
if path.endswith(".whl"):
requires = list(filter(lambda_filter_azure_pkg, pkginfo.get_metadata(path).requires_dist))
else:
_, _, _, requires = parse_setup(path)
# parse pkg name and spec
pkg_spec_dict = dict(parse_require(req) for req in requires)
logging.info("Package requirement: {}".format(pkg_spec_dict))
# find if version is available on pypi
missing_packages = ["{0}{1}".format(pkg, pkg_spec_dict[pkg]) for pkg in pkg_spec_dict.keys() if not is_required_version_on_pypi(pkg, pkg_spec_dict[pkg])]
if missing_packages:
logging.error("Packages not found on PyPI: {}".format(missing_packages))
return missing_packages
def find_tools_packages(root_path):
"""Find packages in tools directory. For e.g. azure-sdk-tools, azure-devtools
"""
glob_string = os.path.join(root_path, "tools", "*", "setup.py")
pkgs = [os.path.basename(os.path.dirname(p)) for p in glob.glob(glob_string)]
logging.info("Packages in tools: {}".format(pkgs))
return pkgs
def get_installed_packages(paths = None):
"""Find packages in default or given lib paths
"""
# WorkingSet returns installed packages in given path
# working_set returns installed packages in default path
# if paths is set then find installed packages from given paths
ws = WorkingSet(paths) if paths else working_set
return ["{0}=={1}".format(p.project_name, p.version) for p in ws]
| 32.947126
| 157
| 0.662155
|
import glob
from subprocess import check_call, CalledProcessError
import os
import errno
import shutil
import sys
import logging
import ast
import textwrap
import io
import re
import pdb
from pkg_resources import parse_version, parse_requirements, Requirement, WorkingSet, working_set
from packaging.specifiers import SpecifierSet
from packaging.version import Version
DEV_REQ_FILE = "dev_requirements.txt"
NEW_DEV_REQ_FILE = "new_dev_requirements.txt"
logging.getLogger().setLevel(logging.INFO)
OMITTED_CI_PACKAGES = [
"azure-mgmt-documentdb",
"azure-servicemanagement-legacy",
"azure-mgmt-scheduler",
"azure",
"azure-mgmt",
"azure-storage",
]
MANAGEMENT_PACKAGE_IDENTIFIERS = [
"mgmt",
"azure-cognitiveservices",
"azure-servicefabric",
"nspkg",
"azure-keyvault",
"azure-synapse"
]
META_PACKAGES = ["azure", "azure-mgmt", "azure-keyvault"]
REGRESSION_EXCLUDED_PACKAGES = [
"azure-common",
]
MANAGEMENT_PACKAGES_FILTER_EXCLUSIONS = [
"azure-mgmt-core",
]
omit_regression = (
lambda x: "nspkg" not in x
and "mgmt" not in x
and os.path.basename(x) not in MANAGEMENT_PACKAGE_IDENTIFIERS
and os.path.basename(x) not in META_PACKAGES
and os.path.basename(x) not in REGRESSION_EXCLUDED_PACKAGES
)
omit_docs = lambda x: "nspkg" not in x and os.path.basename(x) not in META_PACKAGES
omit_build = lambda x: x
lambda_filter_azure_pkg = lambda x: x.startswith("azure") and "-nspkg" not in x
omit_mgmt = lambda x: "mgmt" not in x or os.path.basename(x) in MANAGEMENT_PACKAGES_FILTER_EXCLUSIONS
omit_funct_dict = {
"Build": omit_build,
"Docs": omit_docs,
"Regression": omit_regression,
"Omit_management": omit_mgmt,
}
def log_file(file_location, is_error=False):
with open(file_location, "r") as file:
for line in file:
sys.stdout.write(line)
sys.stdout.write("\n")
sys.stdout.flush()
def read_file(file_location):
str_buffer = ""
with open(file_location, "r") as file:
for line in file:
str_buffer += line
return str_buffer
def cleanup_folder(target_folder):
for file in os.listdir(target_folder):
file_path = os.path.join(target_folder, file)
try:
if os.path.isfile(file_path):
os.remove(file_path)
except Exception as e:
logging.error(e)
def clean_coverage(coverage_dir):
try:
os.mkdir(coverage_dir)
except OSError as e:
if e.errno == errno.EEXIST:
logging.info("Coverage dir already exists. Cleaning.")
cleanup_folder(coverage_dir)
else:
raise
def parse_setup(setup_path):
setup_filename = os.path.join(setup_path, "setup.py")
mock_setup = textwrap.dedent(
"""\
def setup(*args, **kwargs):
__setup_calls__.append((args, kwargs))
"""
)
parsed_mock_setup = ast.parse(mock_setup, filename=setup_filename)
with io.open(setup_filename, "r", encoding="utf-8-sig") as setup_file:
parsed = ast.parse(setup_file.read())
for index, node in enumerate(parsed.body[:]):
if (
not isinstance(node, ast.Expr)
or not isinstance(node.value, ast.Call)
or not hasattr(node.value.func, "id")
or node.value.func.id != "setup"
):
continue
parsed.body[index:index] = parsed_mock_setup.body
break
fixed = ast.fix_missing_locations(parsed)
codeobj = compile(fixed, setup_filename, "exec")
local_vars = {}
global_vars = {"__setup_calls__": []}
current_dir = os.getcwd()
working_dir = os.path.dirname(setup_filename)
os.chdir(working_dir)
exec(codeobj, global_vars, local_vars)
os.chdir(current_dir)
_, kwargs = global_vars["__setup_calls__"][0]
try:
python_requires = kwargs["python_requires"]
except KeyError as e:
python_requires = ">=2.7"
version = kwargs["version"]
name = kwargs["name"]
requires = []
if "install_requires" in kwargs:
requires = kwargs["install_requires"]
return name, version, python_requires, requires
def parse_requirements_file(file_location):
with open(file_location, "r") as f:
reqs = f.read()
return dict((req.name, req) for req in parse_requirements(reqs))
def parse_setup_requires(setup_path):
_, _, python_requires, _ = parse_setup(setup_path)
return python_requires
def filter_for_compatibility(package_set):
collected_packages = []
v = sys.version_info
running_major_version = Version(".".join([str(v[0]), str(v[1]), str(v[2])]))
for pkg in package_set:
spec_set = SpecifierSet(parse_setup_requires(pkg))
if running_major_version in spec_set:
collected_packages.append(pkg)
return collected_packages
def process_glob_string(
glob_string,
target_root_dir,
additional_contains_filter="",
filter_type="Build",
):
if glob_string:
individual_globs = glob_string.split(",")
else:
individual_globs = "azure-*"
collected_top_level_directories = []
for glob_string in individual_globs:
globbed = glob.glob(
os.path.join(target_root_dir, glob_string, "setup.py")
) + glob.glob(os.path.join(target_root_dir, "sdk/*/", glob_string, "setup.py"))
collected_top_level_directories.extend([os.path.dirname(p) for p in globbed])
collected_directories = list(
set(
[
p
for p in collected_top_level_directories
if additional_contains_filter in p
]
)
)
# in this case, do not honor the omission list
if len(collected_directories) == 1:
pkg_set_ci_filtered = filter_for_compatibility(collected_directories)
# however, if there are multiple packages being built, we should honor the omission list and NOT build the omitted
# packages
else:
allowed_package_set = remove_omitted_packages(collected_directories)
pkg_set_ci_filtered = filter_for_compatibility(allowed_package_set)
# Apply filter based on filter type. for e.g. Docs, Regression, Management
pkg_set_ci_filtered = list(filter(omit_funct_dict.get(filter_type, omit_build), pkg_set_ci_filtered))
logging.info(
"Target packages after filtering by CI: {}".format(
pkg_set_ci_filtered
)
)
logging.info(
"Package(s) omitted by CI filter: {}".format(
list(set(collected_directories) - set(pkg_set_ci_filtered))
)
)
return sorted(pkg_set_ci_filtered)
def remove_omitted_packages(collected_directories):
packages = [
package_dir
for package_dir in collected_directories
if os.path.basename(package_dir) not in OMITTED_CI_PACKAGES
]
return packages
def run_check_call(
command_array,
working_directory,
acceptable_return_codes=[],
run_as_shell=False,
always_exit=True,
):
try:
if run_as_shell:
logging.info(
"Command Array: {0}, Target Working Directory: {1}".format(
" ".join(command_array), working_directory
)
)
check_call(" ".join(command_array), cwd=working_directory, shell=True)
else:
logging.info(
"Command Array: {0}, Target Working Directory: {1}".format(
command_array, working_directory
)
)
check_call(command_array, cwd=working_directory)
except CalledProcessError as err:
if err.returncode not in acceptable_return_codes:
logging.error(err) # , file = sys.stderr
if always_exit:
exit(1)
else:
return err
# This function generates code coverage parameters
def create_code_coverage_params(parsed_args, package_name):
coverage_args = []
if parsed_args.disablecov:
logging.info("Code coverage disabled as per the flag(--disablecov)")
coverage_args.append("--no-cov")
else:
current_package_name = package_name.replace("-", ".")
coverage_args.append("--cov={}".format(current_package_name))
logging.info(
"Code coverage is enabled for package {0}, pytest arguements: {1}".format(
current_package_name, coverage_args
)
)
return coverage_args
# This function returns if error code 5 is allowed for a given package
def is_error_code_5_allowed(target_pkg, pkg_name):
if (
all(
map(
lambda x: any(
[pkg_id in x for pkg_id in MANAGEMENT_PACKAGE_IDENTIFIERS]
),
[target_pkg],
)
)
or pkg_name in MANAGEMENT_PACKAGE_IDENTIFIERS
):
return True
else:
return False
# This function parses requirement and return package name and specifier
def parse_require(req):
req_object = Requirement.parse(req)
pkg_name = req_object.key
spec = SpecifierSet(str(req_object).replace(pkg_name, ""))
return [pkg_name, spec]
def find_whl(package_name, version, whl_directory):
if not os.path.exists(whl_directory):
logging.error("Whl directory is incorrect")
exit(1)
logging.info("Searching whl for package {}".format(package_name))
whl_name = "{0}-{1}*.whl".format(package_name.replace("-", "_"), version)
paths = glob.glob(os.path.join(whl_directory, whl_name))
if not paths:
logging.error(
"whl is not found in whl directory {0} for package {1}".format(
whl_directory, package_name
)
)
exit(1)
return paths[0]
# This method installs package from a pre-built whl
def install_package_from_whl(
package_whl_path, working_dir, python_sym_link=sys.executable
):
commands = [python_sym_link, "-m", "pip", "install", package_whl_path]
run_check_call(commands, working_dir)
logging.info("Installed package from {}".format(package_whl_path))
def filter_dev_requirements(pkg_root_path, packages_to_exclude, dest_dir):
# This method returns list of requirements from dev_requirements by filtering out packages in given list
dev_req_path = os.path.join(pkg_root_path, DEV_REQ_FILE)
if not os.path.exists(dev_req_path):
logging.info("{0} is not found in package root {1}".format(DEV_REQ_FILE, pkg_root_path))
return ""
requirements = []
with open(dev_req_path, "r") as dev_req_file:
requirements = dev_req_file.readlines()
# filter any package given in excluded list
requirements = [
req
for req in requirements
if os.path.basename(req.replace("\n", "")) not in packages_to_exclude
]
logging.info("Filtered dev requirements: {}".format(requirements))
# create new dev requirements file with different name for filtered requirements
new_dev_req_path = os.path.join(dest_dir, NEW_DEV_REQ_FILE)
with open(new_dev_req_path, "w") as dev_req_file:
dev_req_file.writelines(requirements)
return new_dev_req_path
def is_required_version_on_pypi(package_name, spec):
from pypi_tools.pypi import PyPIClient
client = PyPIClient()
versions = []
try:
versions = [str(v) for v in client.get_ordered_versions(package_name) if str(v) in spec]
except:
logging.error("Package {} is not found on PyPI", package_name)
return versions
def find_packages_missing_on_pypi(path):
import pkginfo
requires = []
if path.endswith(".whl"):
requires = list(filter(lambda_filter_azure_pkg, pkginfo.get_metadata(path).requires_dist))
else:
_, _, _, requires = parse_setup(path)
# parse pkg name and spec
pkg_spec_dict = dict(parse_require(req) for req in requires)
logging.info("Package requirement: {}".format(pkg_spec_dict))
# find if version is available on pypi
missing_packages = ["{0}{1}".format(pkg, pkg_spec_dict[pkg]) for pkg in pkg_spec_dict.keys() if not is_required_version_on_pypi(pkg, pkg_spec_dict[pkg])]
if missing_packages:
logging.error("Packages not found on PyPI: {}".format(missing_packages))
return missing_packages
def find_tools_packages(root_path):
glob_string = os.path.join(root_path, "tools", "*", "setup.py")
pkgs = [os.path.basename(os.path.dirname(p)) for p in glob.glob(glob_string)]
logging.info("Packages in tools: {}".format(pkgs))
return pkgs
def get_installed_packages(paths = None):
# WorkingSet returns installed packages in given path
# working_set returns installed packages in default path
# if paths is set then find installed packages from given paths
ws = WorkingSet(paths) if paths else working_set
return ["{0}=={1}".format(p.project_name, p.version) for p in ws]
| true
| true
|
1c415330ea5efeebcb714170df4355ed6072565d
| 1,448
|
py
|
Python
|
tools/pubsub2inbox/test/helpers.py
|
marcosgm/professional-services
|
f332b425c2f3b6538ebf65afda7e67de3bed1b3d
|
[
"Apache-2.0"
] | 2,116
|
2017-05-18T19:33:05.000Z
|
2022-03-31T13:34:48.000Z
|
tools/pubsub2inbox/test/helpers.py
|
hyuatpc/professional-services
|
e5c811a8752e91fdf9f959a0414931010b0ea1ba
|
[
"Apache-2.0"
] | 548
|
2017-05-20T05:05:35.000Z
|
2022-03-28T16:38:12.000Z
|
tools/pubsub2inbox/test/helpers.py
|
hyuatpc/professional-services
|
e5c811a8752e91fdf9f959a0414931010b0ea1ba
|
[
"Apache-2.0"
] | 1,095
|
2017-05-19T00:02:36.000Z
|
2022-03-31T05:21:39.000Z
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import yaml
from google.cloud.functions.context import Context
def load_config(config_name, params=None):
with open('test/configs/%s.yaml' % config_name) as config_file:
configuration = config_file.read()
if params:
for k, v in params.items():
configuration = configuration.replace(k, v)
cfg = yaml.load(configuration, Loader=yaml.SafeLoader)
return cfg
def fixture_to_pubsub(fixture):
with open('test/fixtures/%s.json' % fixture, 'r') as file:
data = json.loads(file.read())
event = {
'data': data[0]['message']['data'],
'attributes': data[0]['message']['attributes']
}
context = Context(eventId=data[0]['message']['messageId'],
timestamp=data[0]['message']['publishTime'])
return event, context
| 34.47619
| 76
| 0.665055
|
import json
import yaml
from google.cloud.functions.context import Context
def load_config(config_name, params=None):
with open('test/configs/%s.yaml' % config_name) as config_file:
configuration = config_file.read()
if params:
for k, v in params.items():
configuration = configuration.replace(k, v)
cfg = yaml.load(configuration, Loader=yaml.SafeLoader)
return cfg
def fixture_to_pubsub(fixture):
with open('test/fixtures/%s.json' % fixture, 'r') as file:
data = json.loads(file.read())
event = {
'data': data[0]['message']['data'],
'attributes': data[0]['message']['attributes']
}
context = Context(eventId=data[0]['message']['messageId'],
timestamp=data[0]['message']['publishTime'])
return event, context
| true
| true
|
1c4154cfa8ff7d4d1877f98c4f6d29d63c17d20c
| 13,107
|
py
|
Python
|
safe_eval/eval.py
|
bentheiii/safe_eval
|
caf9e7a6df3d6029e4bdac2abe11326d55c09ed2
|
[
"MIT"
] | 1
|
2021-05-16T17:24:05.000Z
|
2021-05-16T17:24:05.000Z
|
safe_eval/eval.py
|
bentheiii/safe_eval
|
caf9e7a6df3d6029e4bdac2abe11326d55c09ed2
|
[
"MIT"
] | null | null | null |
safe_eval/eval.py
|
bentheiii/safe_eval
|
caf9e7a6df3d6029e4bdac2abe11326d55c09ed2
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import operator as op
from _ast import Name, Call, Starred, Constant, BinOp, Compare, UnaryOp, BoolOp, Attribute, FormattedValue, \
JoinedStr, IfExp, Lambda, Subscript, \
ListComp, SetComp, GeneratorExp, \
DictComp, \
expr, \
List, Tuple, Set, \
Add, Sub, Mult, MatMult, Div, Mod, Pow, LShift, RShift, BitOr, BitXor, BitAnd, FloorDiv, \
Invert, Not, UAdd, USub, \
And, \
Eq, NotEq, Lt, LtE, Gt, GtE, Is, IsNot, In, NotIn, \
Dict
from ast import parse
from collections import ChainMap, deque
from typing import Any, Dict as tDict, Mapping, Deque, TypeVar, Union, Iterable
from safe_eval.attempt_model import Rule, CallAttempt, GetattrAttempt, BinOpAttempt, UOpAttempt, SubscriptAttempt
from safe_eval.default_rules import default_bin_rules, default_callable_rules, default_namespace, default_attr_rules
from safe_eval.errors import UnauthorizedNameAccess, UnauthorizedCall, UnauthorizedAttributeAccess, \
UnauthorizedEvalError, UnauthorizedSubscript
iterable_literals = {
List: list,
Tuple: tuple,
Set: set
}
comprehension_iterables = {
ListComp: list,
SetComp: set,
}
binary_ops = {
Add: op.add, Sub: op.sub, Mult: op.mul, MatMult: op.matmul, Div: op.truediv, Mod: op.mod, Pow: op.pow,
LShift: op.lshift, RShift: op.rshift, BitOr: op.or_, BitXor: op.xor, BitAnd: op.and_, FloorDiv: op.floordiv
}
unary_ops = {
Invert: op.inv, Not: op.not_, UAdd: op.pos, USub: op.neg
}
comparisons = {
Eq: op.eq, NotEq: op.ne, Lt: op.lt, LtE: op.le, Gt: op.gt, GtE: op.ge, Is: op.is_, IsNot: op.is_not,
In: lambda x, y: x in y, NotIn: lambda x, y: x not in y
}
T = TypeVar('T')
def _authorize(attempt: T, rules: Iterable[Rule[T]]):
for rule in rules:
ret = rule(attempt)
if ret is not None:
return ret
return False
class SafeEval:
def __init__(self):
self.call_rules: Deque[Rule[CallAttempt]] = deque(default_callable_rules)
self.getattr_rules: Deque[Rule[GetattrAttempt]] = deque(default_attr_rules)
self.binop_rules: Deque[Rule[BinOpAttempt]] = deque(default_bin_rules)
self.uop_rules: Deque[Rule[UOpAttempt]] = deque()
self.subscript_rules: Deque[Rule[SubscriptAttempt]] = deque()
self.static_vars: tDict[str, Any] = dict(default_namespace)
def call_authorized(self, attempt: CallAttempt):
return _authorize(attempt, self.call_rules)
def getattr_authorized(self, attempt: GetattrAttempt):
return _authorize(attempt, self.getattr_rules)
def binop_authorized(self, attempt: BinOpAttempt):
return _authorize(attempt, self.binop_rules)
def uop_authorized(self, attempt: UOpAttempt):
return _authorize(attempt, self.uop_rules)
def subscript_authorized(self, attempt: SubscriptAttempt):
return _authorize(attempt, self.subscript_rules)
def __call__(self, s: str):
evaluator = Evaluation(self, {})
return evaluator(s)
class Evaluation:
def __init__(self, owner: SafeEval, free_vars: Mapping[str, Any]):
self.owner = owner
self.free_vars = free_vars
self.vars = ChainMap(self.free_vars, self.owner.static_vars)
def with_var(self, new_vars: Mapping[str, Any]):
return type(self)(self.owner, ChainMap(self.free_vars, new_vars))
def _assert_call_authorized(self, func, *args, **kwargs):
attempt = CallAttempt(func, args, kwargs)
if not self.owner.call_authorized(attempt):
raise UnauthorizedCall(attempt)
def _evaluators_from_generator(self, expr: Union[ListComp, SetComp, DictComp]):
iterator_stack = []
gen = expr.generators[0]
iterable = self._handle_expr(gen.iter)
self._assert_call_authorized(iter, iterable)
iterator_stack.append(iter(iterable))
variables = {}
sub_evaluation = self
while iterator_stack:
if len(iterator_stack) < len(expr.generators):
last_generator = expr.generators[len(iterator_stack) - 1]
last_iter = iterator_stack[-1]
self._assert_call_authorized(next, last_iter)
target_id = last_generator.target.id
try:
variables[target_id] = next(last_iter)
except StopIteration:
variables.pop(target_id, None)
iterator_stack.pop()
else:
sub_evaluation = self.with_var(variables)
for condition in expr.ifs:
test = sub_evaluation._handle_expr(condition)
self._assert_call_authorized(bool, test)
if not test:
break
else:
next_gen = expr.generators[len(iterator_stack)]
iterable = sub_evaluation._handle_expr(next_gen.iter)
self._assert_call_authorized(iter, iterable)
iterator_stack.append(iter(iterable))
else:
last_generator = expr.generators[len(iterator_stack) - 1]
last_iter = iterator_stack[-1]
target_id = last_generator.target.id
self._assert_call_authorized(next, last_iter)
try:
variables[target_id] = next(last_iter)
except StopIteration:
variables.pop(target_id, None)
iterator_stack.pop()
else:
sub_evaluation = self.with_var(variables)
yield sub_evaluation
def _handle_expr(self, expr: expr):
if isinstance(expr, Constant):
return expr.value
if isinstance(expr, Name):
if expr.id not in self.vars:
raise UnauthorizedNameAccess(expr.id)
return self.vars[expr.id]
if isinstance(expr, Call):
func = self._handle_expr(expr.func)
args = []
for arg in expr.args:
if isinstance(arg, Starred):
v = self._handle_expr(arg.value)
args.extend(v)
else:
v = self._handle_expr(arg)
args.append(v)
kwargs = {}
for kw in expr.keywords:
v = self._handle_expr(kw.value)
if kw.arg is None:
# double starred
pre_len = len(kwargs)
kwargs.update(v)
if len(kwargs) != pre_len + len(v):
raise TypeError('multiple values for keyword arguments')
elif kw.arg in kwargs:
raise TypeError('multiple values for keyword arguments')
else:
kwargs[kw.arg] = v
attempt = CallAttempt(func, args, kwargs)
if not self.owner.call_authorized(attempt):
raise UnauthorizedCall(attempt)
return func(*args, **kwargs)
if isinstance(expr, BinOp):
operator = binary_ops[type(expr.op)]
left = self._handle_expr(expr.left)
right = self._handle_expr(expr.right)
attempt = BinOpAttempt(left, right, type(expr.op))
if not self.owner.binop_authorized(attempt):
raise UnauthorizedCall(attempt)
return operator(left, right)
if isinstance(expr, UnaryOp):
operator = unary_ops[type(expr.op)]
operand = self._handle_expr(expr.operand)
attempt = UOpAttempt(type(expr.op), operand)
if not self.owner.uop_authorized(attempt):
raise UnauthorizedCall(attempt)
return operator(operand)
if isinstance(expr, BoolOp):
if isinstance(expr.op, And):
for e in expr.values:
ret = self._handle_expr(e)
self._assert_call_authorized(bool, ret)
if not ret:
return ret
return ret
else:
# or
for e in expr.values:
ret = self._handle_expr(e)
self._assert_call_authorized(bool, ret)
if ret:
return ret
return ret
if isinstance(expr, Compare):
operands = [expr.left, *expr.comparators]
for i, (left, operator, right) in enumerate(zip(operands, expr.ops, operands[1:])):
left = self._handle_expr(left)
right = self._handle_expr(right)
operator_py = comparisons[type(operator)]
attempt = BinOpAttempt(left, right, type(operator))
if not self.owner.binop_authorized(attempt):
raise UnauthorizedCall(attempt)
ret = operator_py(left, right)
if i != len(expr.ops)-1:
self._assert_call_authorized(bool, ret)
if not ret:
return ret
return ret
if isinstance(expr, Attribute):
obj = self._handle_expr(expr.value)
attr = expr.attr
attempt = GetattrAttempt(obj, attr)
if not self.owner.getattr_authorized(attempt):
raise UnauthorizedAttributeAccess(attempt)
return getattr(obj, attr)
if isinstance(expr, Dict):
ret = {}
for k_expr, v_expr in zip(expr.keys, expr.values):
v = self._handle_expr(v_expr)
if k_expr is None:
for k in v.keys():
self._assert_call_authorized(hash, k)
ret.update(v)
else:
k = self._handle_expr(k_expr)
self._assert_call_authorized(hash, k)
ret[k] = v
return ret
if isinstance(expr, FormattedValue):
inner = self._handle_expr(expr.value)
if expr.conversion != -1:
conversion = {ord('r'): repr, ord('s'): str, ord('a'): ascii}[expr.conversion]
self._assert_call_authorized(conversion, inner)
inner = conversion(inner)
if expr.format_spec:
format_str = self._handle_expr(expr.format_spec)
self._assert_call_authorized(format, inner, format_str)
inner = format(inner, format_str)
if not isinstance(inner, str):
self._assert_call_authorized(str, inner)
inner = str(inner)
return inner
if isinstance(expr, JoinedStr):
return ''.join(self._handle_expr(v) for v in expr.values)
if isinstance(expr, IfExp):
cond = self._handle_expr(expr.test)
self._assert_call_authorized(bool, cond)
if cond:
return self._handle_expr(expr.body)
return self._handle_expr(expr.orelse)
if isinstance(expr, DictComp):
ret = {}
for ev in self._evaluators_from_generator(expr):
k = ev._handle_expr(expr.key)
self._assert_call_authorized(hash, k)
v = ev._handle_expr(expr.value)
ret[k] = v
return ret
if isinstance(expr, Subscript):
obj = self._handle_expr(expr.value)
args = self._handle_expr(expr.slice)
attempt = SubscriptAttempt(obj, args)
if not self.owner.subscript_authorized(attempt):
raise UnauthorizedSubscript(attempt)
return obj[args]
if isinstance(expr, (Lambda, GeneratorExp)):
raise UnauthorizedEvalError('lazy elements and lambdas are not allowed')
iterable_literal = iterable_literals.get(type(expr))
if iterable_literal:
ret = []
for elt in expr.elts:
if isinstance(elt, Starred):
v = self._handle_expr(elt.value)
ret.extend(v)
else:
v = self._handle_expr(elt)
ret.append(v)
if iterable_literal is set:
for r in ret:
self._assert_call_authorized(hash, r)
return iterable_literal(ret)
comprehension_iterable = comprehension_iterables.get(type(expr))
if comprehension_iterable:
ret = []
for ev in self._evaluators_from_generator(expr):
ret.append(ev._handle_expr(expr.elt))
if comprehension_iterable is set:
for r in ret:
self._assert_call_authorized(hash, r)
return comprehension_iterable(ret)
raise TypeError
def __call__(self, arg: str):
mod = parse(arg).body
if len(mod) != 1:
raise ValueError(f'argument has {len(mod)} statements, expected 1')
return self._handle_expr(mod[0].value)
| 41.609524
| 116
| 0.572976
|
from __future__ import annotations
import operator as op
from _ast import Name, Call, Starred, Constant, BinOp, Compare, UnaryOp, BoolOp, Attribute, FormattedValue, \
JoinedStr, IfExp, Lambda, Subscript, \
ListComp, SetComp, GeneratorExp, \
DictComp, \
expr, \
List, Tuple, Set, \
Add, Sub, Mult, MatMult, Div, Mod, Pow, LShift, RShift, BitOr, BitXor, BitAnd, FloorDiv, \
Invert, Not, UAdd, USub, \
And, \
Eq, NotEq, Lt, LtE, Gt, GtE, Is, IsNot, In, NotIn, \
Dict
from ast import parse
from collections import ChainMap, deque
from typing import Any, Dict as tDict, Mapping, Deque, TypeVar, Union, Iterable
from safe_eval.attempt_model import Rule, CallAttempt, GetattrAttempt, BinOpAttempt, UOpAttempt, SubscriptAttempt
from safe_eval.default_rules import default_bin_rules, default_callable_rules, default_namespace, default_attr_rules
from safe_eval.errors import UnauthorizedNameAccess, UnauthorizedCall, UnauthorizedAttributeAccess, \
UnauthorizedEvalError, UnauthorizedSubscript
iterable_literals = {
List: list,
Tuple: tuple,
Set: set
}
comprehension_iterables = {
ListComp: list,
SetComp: set,
}
binary_ops = {
Add: op.add, Sub: op.sub, Mult: op.mul, MatMult: op.matmul, Div: op.truediv, Mod: op.mod, Pow: op.pow,
LShift: op.lshift, RShift: op.rshift, BitOr: op.or_, BitXor: op.xor, BitAnd: op.and_, FloorDiv: op.floordiv
}
unary_ops = {
Invert: op.inv, Not: op.not_, UAdd: op.pos, USub: op.neg
}
comparisons = {
Eq: op.eq, NotEq: op.ne, Lt: op.lt, LtE: op.le, Gt: op.gt, GtE: op.ge, Is: op.is_, IsNot: op.is_not,
In: lambda x, y: x in y, NotIn: lambda x, y: x not in y
}
T = TypeVar('T')
def _authorize(attempt: T, rules: Iterable[Rule[T]]):
for rule in rules:
ret = rule(attempt)
if ret is not None:
return ret
return False
class SafeEval:
def __init__(self):
self.call_rules: Deque[Rule[CallAttempt]] = deque(default_callable_rules)
self.getattr_rules: Deque[Rule[GetattrAttempt]] = deque(default_attr_rules)
self.binop_rules: Deque[Rule[BinOpAttempt]] = deque(default_bin_rules)
self.uop_rules: Deque[Rule[UOpAttempt]] = deque()
self.subscript_rules: Deque[Rule[SubscriptAttempt]] = deque()
self.static_vars: tDict[str, Any] = dict(default_namespace)
def call_authorized(self, attempt: CallAttempt):
return _authorize(attempt, self.call_rules)
def getattr_authorized(self, attempt: GetattrAttempt):
return _authorize(attempt, self.getattr_rules)
def binop_authorized(self, attempt: BinOpAttempt):
return _authorize(attempt, self.binop_rules)
def uop_authorized(self, attempt: UOpAttempt):
return _authorize(attempt, self.uop_rules)
def subscript_authorized(self, attempt: SubscriptAttempt):
return _authorize(attempt, self.subscript_rules)
def __call__(self, s: str):
evaluator = Evaluation(self, {})
return evaluator(s)
class Evaluation:
def __init__(self, owner: SafeEval, free_vars: Mapping[str, Any]):
self.owner = owner
self.free_vars = free_vars
self.vars = ChainMap(self.free_vars, self.owner.static_vars)
def with_var(self, new_vars: Mapping[str, Any]):
return type(self)(self.owner, ChainMap(self.free_vars, new_vars))
def _assert_call_authorized(self, func, *args, **kwargs):
attempt = CallAttempt(func, args, kwargs)
if not self.owner.call_authorized(attempt):
raise UnauthorizedCall(attempt)
def _evaluators_from_generator(self, expr: Union[ListComp, SetComp, DictComp]):
iterator_stack = []
gen = expr.generators[0]
iterable = self._handle_expr(gen.iter)
self._assert_call_authorized(iter, iterable)
iterator_stack.append(iter(iterable))
variables = {}
sub_evaluation = self
while iterator_stack:
if len(iterator_stack) < len(expr.generators):
last_generator = expr.generators[len(iterator_stack) - 1]
last_iter = iterator_stack[-1]
self._assert_call_authorized(next, last_iter)
target_id = last_generator.target.id
try:
variables[target_id] = next(last_iter)
except StopIteration:
variables.pop(target_id, None)
iterator_stack.pop()
else:
sub_evaluation = self.with_var(variables)
for condition in expr.ifs:
test = sub_evaluation._handle_expr(condition)
self._assert_call_authorized(bool, test)
if not test:
break
else:
next_gen = expr.generators[len(iterator_stack)]
iterable = sub_evaluation._handle_expr(next_gen.iter)
self._assert_call_authorized(iter, iterable)
iterator_stack.append(iter(iterable))
else:
last_generator = expr.generators[len(iterator_stack) - 1]
last_iter = iterator_stack[-1]
target_id = last_generator.target.id
self._assert_call_authorized(next, last_iter)
try:
variables[target_id] = next(last_iter)
except StopIteration:
variables.pop(target_id, None)
iterator_stack.pop()
else:
sub_evaluation = self.with_var(variables)
yield sub_evaluation
def _handle_expr(self, expr: expr):
if isinstance(expr, Constant):
return expr.value
if isinstance(expr, Name):
if expr.id not in self.vars:
raise UnauthorizedNameAccess(expr.id)
return self.vars[expr.id]
if isinstance(expr, Call):
func = self._handle_expr(expr.func)
args = []
for arg in expr.args:
if isinstance(arg, Starred):
v = self._handle_expr(arg.value)
args.extend(v)
else:
v = self._handle_expr(arg)
args.append(v)
kwargs = {}
for kw in expr.keywords:
v = self._handle_expr(kw.value)
if kw.arg is None:
pre_len = len(kwargs)
kwargs.update(v)
if len(kwargs) != pre_len + len(v):
raise TypeError('multiple values for keyword arguments')
elif kw.arg in kwargs:
raise TypeError('multiple values for keyword arguments')
else:
kwargs[kw.arg] = v
attempt = CallAttempt(func, args, kwargs)
if not self.owner.call_authorized(attempt):
raise UnauthorizedCall(attempt)
return func(*args, **kwargs)
if isinstance(expr, BinOp):
operator = binary_ops[type(expr.op)]
left = self._handle_expr(expr.left)
right = self._handle_expr(expr.right)
attempt = BinOpAttempt(left, right, type(expr.op))
if not self.owner.binop_authorized(attempt):
raise UnauthorizedCall(attempt)
return operator(left, right)
if isinstance(expr, UnaryOp):
operator = unary_ops[type(expr.op)]
operand = self._handle_expr(expr.operand)
attempt = UOpAttempt(type(expr.op), operand)
if not self.owner.uop_authorized(attempt):
raise UnauthorizedCall(attempt)
return operator(operand)
if isinstance(expr, BoolOp):
if isinstance(expr.op, And):
for e in expr.values:
ret = self._handle_expr(e)
self._assert_call_authorized(bool, ret)
if not ret:
return ret
return ret
else:
for e in expr.values:
ret = self._handle_expr(e)
self._assert_call_authorized(bool, ret)
if ret:
return ret
return ret
if isinstance(expr, Compare):
operands = [expr.left, *expr.comparators]
for i, (left, operator, right) in enumerate(zip(operands, expr.ops, operands[1:])):
left = self._handle_expr(left)
right = self._handle_expr(right)
operator_py = comparisons[type(operator)]
attempt = BinOpAttempt(left, right, type(operator))
if not self.owner.binop_authorized(attempt):
raise UnauthorizedCall(attempt)
ret = operator_py(left, right)
if i != len(expr.ops)-1:
self._assert_call_authorized(bool, ret)
if not ret:
return ret
return ret
if isinstance(expr, Attribute):
obj = self._handle_expr(expr.value)
attr = expr.attr
attempt = GetattrAttempt(obj, attr)
if not self.owner.getattr_authorized(attempt):
raise UnauthorizedAttributeAccess(attempt)
return getattr(obj, attr)
if isinstance(expr, Dict):
ret = {}
for k_expr, v_expr in zip(expr.keys, expr.values):
v = self._handle_expr(v_expr)
if k_expr is None:
for k in v.keys():
self._assert_call_authorized(hash, k)
ret.update(v)
else:
k = self._handle_expr(k_expr)
self._assert_call_authorized(hash, k)
ret[k] = v
return ret
if isinstance(expr, FormattedValue):
inner = self._handle_expr(expr.value)
if expr.conversion != -1:
conversion = {ord('r'): repr, ord('s'): str, ord('a'): ascii}[expr.conversion]
self._assert_call_authorized(conversion, inner)
inner = conversion(inner)
if expr.format_spec:
format_str = self._handle_expr(expr.format_spec)
self._assert_call_authorized(format, inner, format_str)
inner = format(inner, format_str)
if not isinstance(inner, str):
self._assert_call_authorized(str, inner)
inner = str(inner)
return inner
if isinstance(expr, JoinedStr):
return ''.join(self._handle_expr(v) for v in expr.values)
if isinstance(expr, IfExp):
cond = self._handle_expr(expr.test)
self._assert_call_authorized(bool, cond)
if cond:
return self._handle_expr(expr.body)
return self._handle_expr(expr.orelse)
if isinstance(expr, DictComp):
ret = {}
for ev in self._evaluators_from_generator(expr):
k = ev._handle_expr(expr.key)
self._assert_call_authorized(hash, k)
v = ev._handle_expr(expr.value)
ret[k] = v
return ret
if isinstance(expr, Subscript):
obj = self._handle_expr(expr.value)
args = self._handle_expr(expr.slice)
attempt = SubscriptAttempt(obj, args)
if not self.owner.subscript_authorized(attempt):
raise UnauthorizedSubscript(attempt)
return obj[args]
if isinstance(expr, (Lambda, GeneratorExp)):
raise UnauthorizedEvalError('lazy elements and lambdas are not allowed')
iterable_literal = iterable_literals.get(type(expr))
if iterable_literal:
ret = []
for elt in expr.elts:
if isinstance(elt, Starred):
v = self._handle_expr(elt.value)
ret.extend(v)
else:
v = self._handle_expr(elt)
ret.append(v)
if iterable_literal is set:
for r in ret:
self._assert_call_authorized(hash, r)
return iterable_literal(ret)
comprehension_iterable = comprehension_iterables.get(type(expr))
if comprehension_iterable:
ret = []
for ev in self._evaluators_from_generator(expr):
ret.append(ev._handle_expr(expr.elt))
if comprehension_iterable is set:
for r in ret:
self._assert_call_authorized(hash, r)
return comprehension_iterable(ret)
raise TypeError
def __call__(self, arg: str):
mod = parse(arg).body
if len(mod) != 1:
raise ValueError(f'argument has {len(mod)} statements, expected 1')
return self._handle_expr(mod[0].value)
| true
| true
|
1c41553ba4e6165690f9a5e5c27ea482b29ea8af
| 249
|
py
|
Python
|
transformers/data/processors/__init__.py
|
mandubian/transformers
|
0cb163865a4c761c226b151283309eedb2b1ca4d
|
[
"Apache-2.0"
] | 19
|
2019-12-11T02:16:47.000Z
|
2021-10-04T05:11:22.000Z
|
transformers/data/processors/__init__.py
|
mandubian/transformers
|
0cb163865a4c761c226b151283309eedb2b1ca4d
|
[
"Apache-2.0"
] | 9
|
2020-09-25T20:49:41.000Z
|
2022-02-10T00:32:07.000Z
|
transformers/data/processors/__init__.py
|
mandubian/transformers
|
0cb163865a4c761c226b151283309eedb2b1ca4d
|
[
"Apache-2.0"
] | 5
|
2020-02-10T07:16:54.000Z
|
2021-06-16T19:11:12.000Z
|
from .utils import InputExample, InputFeatures, DataProcessor
from .glue import glue_output_modes, glue_processors, glue_tasks_num_labels, glue_convert_examples_to_features
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 62.25
| 110
| 0.883534
|
from .utils import InputExample, InputFeatures, DataProcessor
from .glue import glue_output_modes, glue_processors, glue_tasks_num_labels, glue_convert_examples_to_features
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| true
| true
|
1c41575a029ee3631339212c2f2268f57c9ea296
| 1,681
|
py
|
Python
|
logging_config.py
|
Vinz87/DSNMonitor
|
0c4a8d84f5adf99394963a75701cdd59ce9bd073
|
[
"MIT"
] | 2
|
2022-01-06T17:52:16.000Z
|
2022-01-06T23:21:46.000Z
|
logging_config.py
|
Vinz87/DSNMonitor
|
0c4a8d84f5adf99394963a75701cdd59ce9bd073
|
[
"MIT"
] | null | null | null |
logging_config.py
|
Vinz87/DSNMonitor
|
0c4a8d84f5adf99394963a75701cdd59ce9bd073
|
[
"MIT"
] | null | null | null |
import os
import sys
import pwd
import grp
import requests
import logging
import logging.handlers
# Disable requests warnings
requests.packages.urllib3.disable_warnings()
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("requests").addHandler(logging.NullHandler())
# Create log folder
parent_folder = os.path.dirname(os.path.realpath(sys.argv[0])) + "/"
if not os.path.exists(parent_folder + "logs/"):
os.makedirs(parent_folder + "logs/")
# Set log file name and permissions
log_filename = parent_folder + "logs/" + os.path.basename(os.path.normpath(parent_folder)) + ".log"
try:
file = open(log_filename, "r")
except IOError:
file = open(log_filename, "w")
if pwd.getpwuid(os.stat(log_filename).st_uid).pw_name != "pi":
uid = pwd.getpwnam("pi").pw_uid
gid = grp.getgrnam("staff").gr_gid
os.chown(log_filename, uid, gid)
# File logger
file_logger = logging.getLogger("my_file_logger")
file_logger.setLevel(logging.DEBUG)
file_logger.propagate = 0
file_formatter = logging.Formatter("%(asctime)s\t%(message)s", datefmt="%Y-%m-%d %H:%M:%S")
file_handler = logging.handlers.TimedRotatingFileHandler(log_filename, when="midnight", backupCount=7)
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(file_formatter)
file_logger.addHandler(file_handler)
# Console logger
console_logger = logging.getLogger("my_console_logger")
console_logger.setLevel(logging.DEBUG)
console_logger.propagate = 0
console_formatter = logging.Formatter("%(asctime)s\t%(message)s", datefmt="%Y-%m-%d %H:%M:%S")
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(console_formatter)
console_logger.addHandler(console_handler)
| 35.765957
| 102
| 0.775729
|
import os
import sys
import pwd
import grp
import requests
import logging
import logging.handlers
requests.packages.urllib3.disable_warnings()
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("requests").addHandler(logging.NullHandler())
parent_folder = os.path.dirname(os.path.realpath(sys.argv[0])) + "/"
if not os.path.exists(parent_folder + "logs/"):
os.makedirs(parent_folder + "logs/")
log_filename = parent_folder + "logs/" + os.path.basename(os.path.normpath(parent_folder)) + ".log"
try:
file = open(log_filename, "r")
except IOError:
file = open(log_filename, "w")
if pwd.getpwuid(os.stat(log_filename).st_uid).pw_name != "pi":
uid = pwd.getpwnam("pi").pw_uid
gid = grp.getgrnam("staff").gr_gid
os.chown(log_filename, uid, gid)
file_logger = logging.getLogger("my_file_logger")
file_logger.setLevel(logging.DEBUG)
file_logger.propagate = 0
file_formatter = logging.Formatter("%(asctime)s\t%(message)s", datefmt="%Y-%m-%d %H:%M:%S")
file_handler = logging.handlers.TimedRotatingFileHandler(log_filename, when="midnight", backupCount=7)
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(file_formatter)
file_logger.addHandler(file_handler)
console_logger = logging.getLogger("my_console_logger")
console_logger.setLevel(logging.DEBUG)
console_logger.propagate = 0
console_formatter = logging.Formatter("%(asctime)s\t%(message)s", datefmt="%Y-%m-%d %H:%M:%S")
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(console_formatter)
console_logger.addHandler(console_handler)
| true
| true
|
1c4158d92d4823278228983279c764c40df5def5
| 146,387
|
py
|
Python
|
src/sage/rings/asymptotic/asymptotic_ring.py
|
robertwb/sage
|
1b1e6f608d1ef8ee664bb19e991efbbc68cbd51f
|
[
"BSL-1.0"
] | 2
|
2018-06-30T01:37:35.000Z
|
2018-06-30T01:37:39.000Z
|
src/sage/rings/asymptotic/asymptotic_ring.py
|
boothby/sage
|
1b1e6f608d1ef8ee664bb19e991efbbc68cbd51f
|
[
"BSL-1.0"
] | null | null | null |
src/sage/rings/asymptotic/asymptotic_ring.py
|
boothby/sage
|
1b1e6f608d1ef8ee664bb19e991efbbc68cbd51f
|
[
"BSL-1.0"
] | null | null | null |
r"""
Asymptotic Ring
This module provides a ring (called :class:`AsymptoticRing`) for
computations with :wikipedia:`asymptotic expansions <Asymptotic_expansion>`.
.. _asymptotic_ring_definition:
(Informal) Definition
=====================
An asymptotic expansion is a sum such as
.. MATH::
5z^3 + 4z^2 + O(z)
as `z \to \infty` or
.. MATH::
3x^{42}y^2 + 7x^3y^3 + O(x^2) + O(y)
as `x` and `y` tend to `\infty`. It is a truncated series (after a
finite number of terms), which approximates a function.
The summands of the asymptotic expansions are partially ordered. In
this module these summands are the following:
- Exact terms `c\cdot g` with a coefficient `c` and an element `g` of
a growth group (:ref:`see below <asymptotic_ring_growth>`).
- `O`-terms `O(g)` (see :wikipedia:`Big O notation <Big_O_notation>`;
also called *Bachmann--Landau notation*) for a growth group
element `g` (:ref:`again see below <asymptotic_ring_growth>`).
See
:wikipedia:`the Wikipedia article on asymptotic expansions <Asymptotic_expansion>`
for more details.
Further examples of such elements can be found :ref:`here <asymptotic_ring_intro>`.
.. _asymptotic_ring_growth:
Growth Groups and Elements
--------------------------
The elements of a :doc:`growth group <growth_group>` are equipped with
a partial order and usually contain a variable. Examples---the order
is described below these examples---are
- elements of the form `z^q` for some integer or rational `q`
(growth groups with :ref:`description strings <growth_group_description>`
``z^ZZ`` or ``z^QQ``),
- elements of the form `\log(z)^q` for some integer or rational `q`
(growth groups ``log(z)^ZZ`` or ``log(z)^QQ``),
- elements of the form `a^z` for some
rational `a` (growth group ``QQ^z``), or
- more sophisticated constructions like products
`x^r \cdot \log(x)^s \cdot a^y \cdot y^q`
(this corresponds to an element of the growth group
``x^QQ * log(x)^ZZ * QQ^y * y^QQ``).
The order in all these examples is induced by the magnitude of the
elements as `x`, `y`, or `z` (independently) tend to `\infty`. For
elements only using the variable `z` this means that `g_1 \leq g_2` if
.. MATH::
\lim_{z\to\infty} \frac{g_1}{g_2} \leq 1.
.. NOTE::
Asymptotic rings where the variable tend to some value distinct from
`\infty` are not yet implemented.
To find out more about
- growth groups,
- on how they are created and
- about the above used *descriptions strings*
see the top of the module :doc:`growth group <growth_group>`.
.. WARNING::
As this code is experimental, a warning is thrown when an
asymptotic ring (or an associated structure) is created for the
first time in a session (see
:class:`sage.misc.superseded.experimental`).
TESTS::
sage: from sage.rings.asymptotic.growth_group import GrowthGroup
sage: G = GrowthGroup('x^ZZ')
doctest:...: FutureWarning: This class/method/function is marked as
experimental. It, its functionality or its interface might change
without a formal deprecation.
See http://trac.sagemath.org/17601 for details.
.. _asymptotic_ring_intro:
Introductory Examples
=====================
We start this series of examples by defining two asymptotic rings.
Two Rings
---------
A Univariate Asymptotic Ring
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
First, we construct the following (very simple) asymptotic ring in the variable `z`::
sage: A.<z> = AsymptoticRing(growth_group='z^QQ', coefficient_ring=ZZ); A
Asymptotic Ring <z^QQ> over Integer Ring
A typical element of this ring is
::
sage: A.an_element()
z^(3/2) + O(z^(1/2))
This element consists of two summands: the exact term with coefficient
`1` and growth `z^{3/2}` and the `O`-term `O(z^{1/2})`. Note that the
growth of `z^{3/2}` is larger than the growth of `z^{1/2}` as
`z\to\infty`, thus this expansion cannot be simplified (which would
be done automatically, see below).
Elements can be constructed via the generator `z` and the function
:func:`~sage.rings.big_oh.O`, for example
::
sage: 4*z^2 + O(z)
4*z^2 + O(z)
A Multivariate Asymptotic Ring
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Next, we construct a more sophisticated asymptotic ring in the
variables `x` and `y` by
::
sage: B.<x, y> = AsymptoticRing(growth_group='x^QQ * log(x)^ZZ * QQ^y * y^QQ', coefficient_ring=QQ); B
Asymptotic Ring <x^QQ * log(x)^ZZ * QQ^y * y^QQ> over Rational Field
Again, we can look at a typical (nontrivial) element::
sage: B.an_element()
1/8*x^(3/2)*log(x)^3*(1/8)^y*y^(3/2) + O(x^(1/2)*log(x)*(1/2)^y*y^(1/2))
Again, elements can be created using the generators `x` and `y`, as well as
the function :func:`~sage.rings.big_oh.O`::
sage: log(x)*y/42 + O(1/2^y)
1/42*log(x)*y + O((1/2)^y)
Arithmetical Operations
-----------------------
In this section we explain how to perform various arithmetical
operations with the elements of the asymptotic rings constructed
above.
The Ring Operations Plus and Times
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
We start our calculations in the ring
::
sage: A
Asymptotic Ring <z^QQ> over Integer Ring
Of course, we can perform the usual ring operations `+` and `*`::
sage: z^2 + 3*z*(1-z)
-2*z^2 + 3*z
sage: (3*z + 2)^3
27*z^3 + 54*z^2 + 36*z + 8
In addition to that, special powers---our growth group ``z^QQ`` allows
the exponents to be out of `\QQ`---can also be computed::
sage: (z^(5/2)+z^(1/7)) * z^(-1/5)
z^(23/10) + z^(-2/35)
The central concepts of computations with asymptotic expansions is
that the `O`-notation can be used. For example, we have
::
sage: z^3 + z^2 + z + O(z^2)
z^3 + O(z^2)
where the result is simplified automatically. A more sophisticated example is
::
sage: (z+2*z^2+3*z^3+4*z^4) * (O(z)+z^2)
4*z^6 + O(z^5)
Division
^^^^^^^^
The asymptotic expansions support division. For example, we can
expand `1/(z-1)` to a geometric series::
sage: 1 / (z-1)
z^(-1) + z^(-2) + z^(-3) + z^(-4) + ... + z^(-20) + O(z^(-21))
A default precision (parameter ``default_prec`` of
:class:`AsymptoticRing`) is predefined. Thus, only the first `20`
summands are calculated. However, if we only want the first `5` exact
terms, we cut of the rest by using
::
sage: (1 / (z-1)).truncate(5)
z^(-1) + z^(-2) + z^(-3) + z^(-4) + z^(-5) + O(z^(-6))
or
::
sage: 1 / (z-1) + O(z^(-6))
z^(-1) + z^(-2) + z^(-3) + z^(-4) + z^(-5) + O(z^(-6))
Of course, we can work with more complicated expansions as well::
sage: (4*z+1) / (z^3+z^2+z+O(z^0))
4*z^(-2) - 3*z^(-3) - z^(-4) + O(z^(-5))
Not all elements are invertible, for instance,
::
sage: 1 / O(z)
Traceback (most recent call last):
...
ZeroDivisionError: Cannot invert O(z).
is not invertible, since it includes `0`.
Powers, Expontials and Logarithms
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
It works as simple as it can be; just use the usual operators ``^``,
``exp`` and ``log``. For example, we obtain the usual series expansion
of the logarithm
::
sage: -log(1-1/z)
z^(-1) + 1/2*z^(-2) + 1/3*z^(-3) + ... + O(z^(-21))
as `z \to \infty`.
Similarly, we can apply the exponential function of an asymptotic expansion::
sage: exp(1/z)
1 + z^(-1) + 1/2*z^(-2) + 1/6*z^(-3) + 1/24*z^(-4) + ... + O(z^(-20))
Arbitrary powers work as well; for example, we have
::
sage: (1 + 1/z + O(1/z^5))^(1 + 1/z)
1 + z^(-1) + z^(-2) + 1/2*z^(-3) + 1/3*z^(-4) + O(z^(-5))
.. NOTE::
In the asymptotic ring
::
sage: M.<n> = AsymptoticRing(growth_group='QQ^n * n^QQ', coefficient_ring=ZZ)
the operation
::
sage: (1/2)^n
Traceback (most recent call last):
...
ValueError: 1/2 is not in Exact Term Monoid QQ^n * n^QQ
with coefficients in Integer Ring. ...
fails, since the rational `1/2` is not contained in `M`. You can use
::
sage: n.rpow(1/2)
(1/2)^n
instead. (See also the examples in
:meth:`ExactTerm.rpow() <sage.rings.asymptotic.term_monoid.ExactTerm.rpow>`
for a detailed explanation.)
Another way is to use a larger coefficient ring::
sage: M_QQ.<n> = AsymptoticRing(growth_group='QQ^n * n^QQ', coefficient_ring=QQ)
sage: (1/2)^n
(1/2)^n
Multivariate Arithmetic
^^^^^^^^^^^^^^^^^^^^^^^
Now let us move on to arithmetic in the multivariate ring
::
sage: B
Asymptotic Ring <x^QQ * log(x)^ZZ * QQ^y * y^QQ> over Rational Field
.. TODO::
write this part
More Examples
=============
The mathematical constant e as a limit
--------------------------------------
The base of the natural logarithm `e` satisfies the equation
.. MATH::
e = \lim_{n\to\infty} \left(1+\frac{1}{n}\right)^n
By using asymptotic expansions, we obtain the more precise result
::
sage: E.<n> = AsymptoticRing(growth_group='n^ZZ', coefficient_ring=SR, default_prec=5); E
Asymptotic Ring <n^ZZ> over Symbolic Ring
sage: (1 + 1/n)^n
e - 1/2*e*n^(-1) + 11/24*e*n^(-2) - 7/16*e*n^(-3) + 2447/5760*e*n^(-4) + O(n^(-5))
Selected Technical Details
==========================
Coercions and Functorial Constructions
--------------------------------------
The :class:`AsymptoticRing` fully supports
`coercion <../../../../coercion/index.html>`_. For example, the coefficient ring is automatically extended when needed::
sage: A
Asymptotic Ring <z^QQ> over Integer Ring
sage: (z + 1/2).parent()
Asymptotic Ring <z^QQ> over Rational Field
Here, the coefficient ring was extended to allow `1/2` as a
coefficient. Another example is
::
sage: C.<c> = AsymptoticRing(growth_group='c^ZZ', coefficient_ring=ZZ['e'])
sage: C.an_element()
e^3*c^3 + O(c)
sage: C.an_element() / 7
1/7*e^3*c^3 + O(c)
Here the result's coefficient ring is the newly found
::
sage: (C.an_element() / 7).parent()
Asymptotic Ring <c^ZZ> over
Univariate Polynomial Ring in e over Rational Field
Not only the coefficient ring can be extended, but the growth group as
well. For example, we can add/multiply elements of the asymptotic
rings ``A`` and ``C`` to get an expansion of new asymptotic ring::
sage: r = c*z + c/2 + O(z); r
c*z + 1/2*c + O(z)
sage: r.parent()
Asymptotic Ring <c^ZZ * z^QQ> over
Univariate Polynomial Ring in e over Rational Field
Data Structures
---------------
The summands of an
:class:`asymptotic expansion <AsymptoticExpansion>` are wrapped
:doc:`growth group elements <growth_group>`.
This wrapping is done by the
:doc:`term monoid module <term_monoid>`.
However, inside an
:class:`asymptotic expansion <AsymptoticExpansion>` these summands
(terms) are stored together with their growth-relationship, i.e., each
summand knows its direct predecessors and successors. As a data
structure a special poset (namely a
:mod:`mutable poset <sage.data_structures.mutable_poset>`)
is used. We can have a look at this::
sage: b = x^3*y + x^2*y + x*y^2 + O(x) + O(y)
sage: print(b.summands.repr_full(reverse=True))
poset(x*y^2, x^3*y, x^2*y, O(x), O(y))
+-- oo
| +-- no successors
| +-- predecessors: x*y^2, x^3*y
+-- x*y^2
| +-- successors: oo
| +-- predecessors: O(x), O(y)
+-- x^3*y
| +-- successors: oo
| +-- predecessors: x^2*y
+-- x^2*y
| +-- successors: x^3*y
| +-- predecessors: O(x), O(y)
+-- O(x)
| +-- successors: x*y^2, x^2*y
| +-- predecessors: null
+-- O(y)
| +-- successors: x*y^2, x^2*y
| +-- predecessors: null
+-- null
| +-- successors: O(x), O(y)
| +-- no predecessors
Various
=======
AUTHORS:
- Benjamin Hackl (2015)
- Daniel Krenn (2015)
- Clemens Heuberger (2016)
ACKNOWLEDGEMENT:
- Benjamin Hackl, Clemens Heuberger and Daniel Krenn are supported by the
Austrian Science Fund (FWF): P 24644-N26.
- Benjamin Hackl is supported by the Google Summer of Code 2015.
Classes and Methods
===================
"""
# *****************************************************************************
# Copyright (C) 2015 Benjamin Hackl <benjamin.hackl@aau.at>
# 2015 Daniel Krenn <dev@danielkrenn.at>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# http://www.gnu.org/licenses/
# *****************************************************************************
from __future__ import print_function
from __future__ import absolute_import
from sage.rings.ring import Algebra
from sage.structure.element import CommutativeAlgebraElement
from sage.structure.unique_representation import UniqueRepresentation
from sage.misc.defaults import series_precision
from sage.misc.superseded import experimental
from sage.rings.all import RIF
class NoConvergenceError(RuntimeError):
r"""
A special :python:`RuntimeError<library/exceptions.html#exceptions.RuntimeError>`
which is raised when an algorithm does not converge/stop.
"""
pass
class AsymptoticExpansion(CommutativeAlgebraElement):
r"""
Class for asymptotic expansions, i.e., the elements of an
:class:`AsymptoticRing`.
INPUT:
- ``parent`` -- the parent of the asymptotic expansion.
- ``summands`` -- the summands as a
:class:`~sage.data_structures.mutable_poset.MutablePoset`, which
represents the underlying structure.
- ``simplify`` -- a boolean (default: ``True``). It controls
automatic simplification (absorption) of the asymptotic expansion.
- ``convert`` -- a boolean (default: ``True``). If set, then the
``summands`` are converted to the asymptotic ring (the parent of this
expansion). If not, then the summands are taken as they are. In
that case, the caller must ensure that the parent of the terms is
set correctly.
EXAMPLES:
There are several ways to create asymptotic expansions; usually
this is done by using the corresponding :class:`asymptotic rings <AsymptoticRing>`::
sage: R_x.<x> = AsymptoticRing(growth_group='x^QQ', coefficient_ring=QQ); R_x
Asymptotic Ring <x^QQ> over Rational Field
sage: R_y.<y> = AsymptoticRing(growth_group='y^ZZ', coefficient_ring=ZZ); R_y
Asymptotic Ring <y^ZZ> over Integer Ring
At this point, `x` and `y` are already asymptotic expansions::
sage: type(x)
<class 'sage.rings.asymptotic.asymptotic_ring.AsymptoticRing_with_category.element_class'>
The usual ring operations, but allowing rational exponents (growth
group ``x^QQ``) can be performed::
sage: x^2 + 3*(x - x^(2/5))
x^2 + 3*x - 3*x^(2/5)
sage: (3*x^(1/3) + 2)^3
27*x + 54*x^(2/3) + 36*x^(1/3) + 8
One of the central ideas behind computing with asymptotic
expansions is that the `O`-notation (see
:wikipedia:`Big_O_notation`) can be used. For example, we have::
sage: (x+2*x^2+3*x^3+4*x^4) * (O(x)+x^2)
4*x^6 + O(x^5)
In particular, :func:`~sage.rings.big_oh.O` can be used to
construct the asymptotic expansions. With the help of the
:meth:`summands`, we can also have a look at the inner structure
of an asymptotic expansion::
sage: expr1 = x + 2*x^2 + 3*x^3 + 4*x^4; expr2 = O(x) + x^2
sage: print(expr1.summands.repr_full())
poset(x, 2*x^2, 3*x^3, 4*x^4)
+-- null
| +-- no predecessors
| +-- successors: x
+-- x
| +-- predecessors: null
| +-- successors: 2*x^2
+-- 2*x^2
| +-- predecessors: x
| +-- successors: 3*x^3
+-- 3*x^3
| +-- predecessors: 2*x^2
| +-- successors: 4*x^4
+-- 4*x^4
| +-- predecessors: 3*x^3
| +-- successors: oo
+-- oo
| +-- predecessors: 4*x^4
| +-- no successors
sage: print(expr2.summands.repr_full())
poset(O(x), x^2)
+-- null
| +-- no predecessors
| +-- successors: O(x)
+-- O(x)
| +-- predecessors: null
| +-- successors: x^2
+-- x^2
| +-- predecessors: O(x)
| +-- successors: oo
+-- oo
| +-- predecessors: x^2
| +-- no successors
sage: print((expr1 * expr2).summands.repr_full())
poset(O(x^5), 4*x^6)
+-- null
| +-- no predecessors
| +-- successors: O(x^5)
+-- O(x^5)
| +-- predecessors: null
| +-- successors: 4*x^6
+-- 4*x^6
| +-- predecessors: O(x^5)
| +-- successors: oo
+-- oo
| +-- predecessors: 4*x^6
| +-- no successors
In addition to the monomial growth elements from above, we can
also compute with logarithmic terms (simply by constructing the
appropriate growth group)::
sage: R_log = AsymptoticRing(growth_group='log(x)^QQ', coefficient_ring=QQ)
sage: lx = R_log(log(SR.var('x')))
sage: (O(lx) + lx^3)^4
log(x)^12 + O(log(x)^10)
.. SEEALSO::
:doc:`growth_group`,
:doc:`term_monoid`,
:mod:`~sage.data_structures.mutable_poset`.
"""
def __init__(self, parent, summands, simplify=True, convert=True):
r"""
See :class:`AsymptoticExpansion` for more information.
TESTS::
sage: R_x.<x> = AsymptoticRing(growth_group='x^ZZ', coefficient_ring=ZZ)
sage: R_y.<y> = AsymptoticRing(growth_group='y^ZZ', coefficient_ring=ZZ)
sage: R_x is R_y
False
sage: ex1 = x + 2*x^2 + 3*x^3 + 4*x^4 + 5*x^5
sage: ex2 = x + O(R_x(1))
sage: ex1 * ex2
5*x^6 + O(x^5)
::
sage: from sage.rings.asymptotic.growth_group import GrowthGroup
sage: from sage.rings.asymptotic.term_monoid import TermMonoid
sage: G = GrowthGroup('x^ZZ'); x = G.gen()
sage: OT = TermMonoid('O', G, ZZ); ET = TermMonoid('exact', G, ZZ)
sage: R = AsymptoticRing(G, ZZ)
sage: lst = [ET(x, 1), ET(x^2, 2), OT(x^3), ET(x^4, 4)]
sage: expr = R(lst, simplify=False); expr # indirect doctest
4*x^4 + O(x^3) + 2*x^2 + x
sage: print(expr.summands.repr_full())
poset(x, 2*x^2, O(x^3), 4*x^4)
+-- null
| +-- no predecessors
| +-- successors: x
+-- x
| +-- predecessors: null
| +-- successors: 2*x^2
+-- 2*x^2
| +-- predecessors: x
| +-- successors: O(x^3)
+-- O(x^3)
| +-- predecessors: 2*x^2
| +-- successors: 4*x^4
+-- 4*x^4
| +-- predecessors: O(x^3)
| +-- successors: oo
+-- oo
| +-- predecessors: 4*x^4
| +-- no successors
sage: expr._simplify_(); expr
4*x^4 + O(x^3)
sage: print(expr.summands.repr_full())
poset(O(x^3), 4*x^4)
+-- null
| +-- no predecessors
| +-- successors: O(x^3)
+-- O(x^3)
| +-- predecessors: null
| +-- successors: 4*x^4
+-- 4*x^4
| +-- predecessors: O(x^3)
| +-- successors: oo
+-- oo
| +-- predecessors: 4*x^4
| +-- no successors
sage: R(lst, simplify=True) # indirect doctest
4*x^4 + O(x^3)
::
sage: R.<x> = AsymptoticRing(growth_group='x^QQ', coefficient_ring=QQ)
sage: e = R(x^2 + O(x))
sage: from sage.rings.asymptotic.asymptotic_ring import AsymptoticExpansion
sage: S = AsymptoticRing(growth_group='x^QQ', coefficient_ring=ZZ)
sage: for s in AsymptoticExpansion(S, e.summands).summands.elements_topological():
....: print(s.parent())
O-Term Monoid x^QQ with implicit coefficients in Integer Ring
Exact Term Monoid x^QQ with coefficients in Integer Ring
sage: for s in AsymptoticExpansion(S, e.summands,
....: convert=False).summands.elements_topological():
....: print(s.parent())
O-Term Monoid x^QQ with implicit coefficients in Rational Field
Exact Term Monoid x^QQ with coefficients in Rational Field
::
sage: AsymptoticExpansion(S, R(1/2).summands)
Traceback (most recent call last):
...
ValueError: Cannot include 1/2 with parent
Exact Term Monoid x^QQ with coefficients in Rational Field in
Asymptotic Ring <x^QQ> over Integer Ring
> *previous* ValueError: 1/2 is not a coefficient in
Exact Term Monoid x^QQ with coefficients in Integer Ring.
Check :trac:`19921`::
sage: CR.<Z> = QQ['Z']
sage: CR_mod = CR.quotient((Z^2 - 1)*CR)
sage: R.<x> = AsymptoticRing(growth_group='x^NN', coefficient_ring=CR)
sage: R_mod = R.change_parameter(coefficient_ring=CR_mod)
sage: e = 1 + x*(Z^2-1)
sage: R_mod(e)
1
Check that :trac:`19999` is resolved::
sage: A.<x> = AsymptoticRing('QQ^x * x^QQ', QQ)
sage: 1 + (-1)^x + 2^x + (-2)^x
(-2)^x + 2^x + (-1)^x + 1
"""
super(AsymptoticExpansion, self).__init__(parent=parent)
from sage.data_structures.mutable_poset import MutablePoset
if not isinstance(summands, MutablePoset):
raise TypeError('Summands %s are not in a mutable poset as expected '
'when creating an element of %s.' % (summands, parent))
if convert:
from .misc import combine_exceptions
from .term_monoid import TermMonoid, ZeroCoefficientError
def convert_terms(element):
T = TermMonoid(term_monoid=element.parent(), asymptotic_ring=parent)
try:
return T(element)
except ZeroCoefficientError:
return None
except (ValueError, TypeError) as e:
raise combine_exceptions(
ValueError('Cannot include %s with parent %s in %s' %
(element, element.parent(), parent)), e)
new_summands = summands.copy()
new_summands.map(convert_terms, topological=True, reverse=True)
self._summands_ = new_summands
else:
self._summands_ = summands
if simplify:
self._simplify_()
@property
def summands(self):
r"""
The summands of this asymptotic expansion stored in the
underlying data structure (a
:class:`~sage.data_structures.mutable_poset.MutablePoset`).
EXAMPLES::
sage: R.<x> = AsymptoticRing(growth_group='x^ZZ', coefficient_ring=ZZ)
sage: expr = 7*x^12 + x^5 + O(x^3)
sage: expr.summands
poset(O(x^3), x^5, 7*x^12)
.. SEEALSO::
:class:`sage.data_structures.mutable_poset.MutablePoset`
"""
return self._summands_
def __hash__(self):
r"""
A hash value for this element.
.. WARNING::
This hash value uses the string representation and might not be
always right.
TESTS::
sage: R_log = AsymptoticRing(growth_group='log(x)^QQ', coefficient_ring=QQ)
sage: lx = R_log(log(SR.var('x')))
sage: elt = (O(lx) + lx^3)^4
sage: hash(elt) # random
-4395085054568712393
"""
return hash(str(self))
def __nonzero__(self):
r"""
Return whether this asymptotic expansion is not identically zero.
INPUT:
Nothing.
OUTPUT:
A boolean.
TESTS::
sage: R.<x> = AsymptoticRing(growth_group='x^ZZ', coefficient_ring=ZZ)
sage: bool(R(0)) # indirect doctest
False
sage: bool(x) # indirect doctest
True
sage: bool(7*x^12 + x^5 + O(x^3)) # indirect doctest
True
"""
return bool(self._summands_)
def __eq__(self, other):
r"""
Return whether this asymptotic expansion is equal to ``other``.
INPUT:
- ``other`` -- an object.
OUTPUT:
A boolean.
.. NOTE::
This function uses the coercion model to find a common
parent for the two operands.
EXAMPLES::
sage: R.<x> = AsymptoticRing('x^ZZ', QQ)
sage: (1 + 2*x + 3*x^2) == (3*x^2 + 2*x + 1) # indirect doctest
True
sage: O(x) == O(x)
False
TESTS::
sage: x == None
False
::
sage: x == 'x'
False
"""
if other is None:
return False
try:
return not bool(self - other)
except (TypeError, ValueError):
return False
def __ne__(self, other):
r"""
Return whether this asymptotic expansion is not equal to ``other``.
INPUT:
- ``other`` -- an object.
OUTPUT:
A boolean.
.. NOTE::
This function uses the coercion model to find a common
parent for the two operands.
EXAMPLES::
sage: R.<x> = AsymptoticRing('x^ZZ', QQ)
sage: (1 + 2*x + 3*x^2) != (3*x^2 + 2*x + 1) # indirect doctest
False
sage: O(x) != O(x)
True
TESTS::
sage: x != None
True
"""
return not self == other
def has_same_summands(self, other):
r"""
Return whether this asymptotic expansion and ``other`` have the
same summands.
INPUT:
- ``other`` -- an asymptotic expansion.
OUTPUT:
A boolean.
.. NOTE::
While for example ``O(x) == O(x)`` yields ``False``,
these expansions *do* have the same summands and this method
returns ``True``.
Moreover, this method uses the coercion model in order to
find a common parent for this asymptotic expansion and
``other``.
EXAMPLES::
sage: R_ZZ.<x_ZZ> = AsymptoticRing('x^ZZ', ZZ)
sage: R_QQ.<x_QQ> = AsymptoticRing('x^ZZ', QQ)
sage: sum(x_ZZ^k for k in range(5)) == sum(x_QQ^k for k in range(5)) # indirect doctest
True
sage: O(x_ZZ) == O(x_QQ)
False
TESTS::
sage: x_ZZ.has_same_summands(None)
False
"""
if other is None:
return False
from sage.structure.element import have_same_parent
if have_same_parent(self, other):
return self._has_same_summands_(other)
from sage.structure.element import get_coercion_model
return get_coercion_model().bin_op(self, other,
lambda self, other:
self._has_same_summands_(other))
def _has_same_summands_(self, other):
r"""
Return whether this :class:`AsymptoticExpansion` has the same
summands as ``other``.
INPUT:
- ``other`` -- an :class:`AsymptoticExpansion`.
OUTPUT:
A boolean.
.. NOTE::
This method compares two :class:`AsymptoticExpansion`
with the same parent.
EXAMPLES::
sage: R.<x> = AsymptoticRing('x^ZZ', QQ)
sage: O(x).has_same_summands(O(x))
True
sage: (1 + x + 2*x^2).has_same_summands(2*x^2 + O(x)) # indirect doctest
False
"""
if len(self.summands) != len(other.summands):
return False
from builtins import zip
return all(s == o for s, o in
zip(self.summands.elements_topological(),
other.summands.elements_topological()))
def _simplify_(self):
r"""
Simplify this asymptotic expansion.
INPUT:
Nothing.
OUTPUT:
Nothing, but modifies this asymptotic expansion.
.. NOTE::
This method is usually called during initialization of
this asymptotic expansion.
.. NOTE::
This asymptotic expansion is simplified by letting
`O`-terms that are included in this expansion absorb all
terms with smaller growth.
TESTS::
sage: from sage.rings.asymptotic.growth_group import GrowthGroup
sage: from sage.rings.asymptotic.term_monoid import TermMonoid
sage: G = GrowthGroup('x^ZZ')
sage: OT = TermMonoid('O', G, ZZ); ET = TermMonoid('exact', G, ZZ)
sage: R = AsymptoticRing(G, ZZ)
sage: lst = [ET(x, 1), ET(x^2, 2), OT(x^3), ET(x^4, 4)]
sage: expr = R(lst, simplify=False); expr # indirect doctest
4*x^4 + O(x^3) + 2*x^2 + x
sage: expr._simplify_(); expr
4*x^4 + O(x^3)
sage: R(lst) # indirect doctest
4*x^4 + O(x^3)
"""
self._summands_.merge(reverse=True)
def _repr_(self, latex=False):
r"""
A representation string for this asymptotic expansion.
INPUT:
- ``latex`` -- (default: ``False``) a boolean. If set, then
LaTeX-output is returned.
OUTPUT:
A string.
EXAMPLES::
sage: R.<x> = AsymptoticRing(growth_group='x^ZZ', coefficient_ring=ZZ)
sage: (5*x^2+12*x) * (x^3+O(x)) # indirect doctest
5*x^5 + 12*x^4 + O(x^3)
sage: (5*x^2-12*x) * (x^3+O(x)) # indirect doctest
5*x^5 - 12*x^4 + O(x^3)
"""
if latex:
from sage.misc.latex import latex as latex_repr
f = latex_repr
else:
f = repr
s = ' + '.join(f(elem) for elem in
self.summands.elements_topological(reverse=True))
s = s.replace('+ -', '- ')
if not s:
return '0'
return s
def _latex_(self):
r"""
A LaTeX-representation string for this asymptotic expansion.
OUTPUT:
A string.
TESTS::
sage: R.<x> = AsymptoticRing(growth_group='x^ZZ', coefficient_ring=ZZ)
sage: latex((5*x^2+12*x) * (x^3+O(x))) # indirect doctest
5 x^{5} + 12 x^{4} + O\!\left(x^{3}\right)
sage: latex((5*x^2-12*x) * (x^3+O(x))) # indirect doctest
5 x^{5} - 12 x^{4} + O\!\left(x^{3}\right)
"""
return self._repr_(latex=True)
def show(self):
r"""
Pretty-print this asymptotic expansion.
OUTPUT:
Nothing, the representation is printed directly on the
screen.
EXAMPLES::
sage: A.<x> = AsymptoticRing('QQ^x * x^QQ * log(x)^QQ', SR.subring(no_variables=True))
sage: (pi/2 * 5^x * x^(42/17) - sqrt(euler_gamma) * log(x)^(-7/8)).show()
<html><script type="math/tex">\newcommand{\Bold}[1]{\mathbf{#1}}\frac{1}{2} \, \pi
5^{x} x^{\frac{42}{17}} - \sqrt{\gamma_E} \log\left(x\right)^{-\frac{7}{8}}</script></html>
TESTS::
sage: A.<x> = AsymptoticRing('(e^x)^QQ * x^QQ', SR.subring(no_variables=True))
sage: (zeta(3) * (e^x)^(-1/2) * x^42).show()
<html><script type="math/tex">\newcommand{\Bold}[1]{\mathbf{#1}}\zeta(3)
\left(e^{x}\right)^{-\frac{1}{2}} x^{42}</script></html>
"""
from sage.repl.rich_output.pretty_print import pretty_print
pretty_print(self)
def _add_(self, other):
r"""
Add ``other`` to this asymptotic expansion.
INPUT:
- ``other`` -- an :class:`AsymptoticExpansion`.
OUTPUT:
The sum as an :class:`AsymptoticExpansion`.
EXAMPLES::
sage: R.<x> = AsymptoticRing(growth_group='x^ZZ', coefficient_ring=ZZ)
sage: expr1 = x^123; expr2 = x^321
sage: expr1._add_(expr2)
x^321 + x^123
sage: expr1 + expr2 # indirect doctest
x^321 + x^123
If an `O`-term is added to an asymptotic expansion, then
the `O`-term absorbs everything it can::
sage: x^123 + x^321 + O(x^555) # indirect doctest
O(x^555)
TESTS::
sage: x + O(x)
O(x)
sage: O(x) + x
O(x)
"""
return self.parent()(self.summands.union(other.summands),
simplify=True, convert=False)
def _sub_(self, other):
r"""
Subtract ``other`` from this asymptotic expansion.
INPUT:
- ``other`` -- an :class:`AsymptoticExpansion`.
OUTPUT:
The difference as an :class:`AsymptoticExpansion`.
.. NOTE::
Subtraction of two asymptotic expansions is implemented
by means of addition: `e_1 - e_2 = e_1 + (-1)\cdot e_2`.
EXAMPLES::
sage: R.<x> = AsymptoticRing(growth_group='x^ZZ', coefficient_ring=ZZ)
sage: expr1 = x^123; expr2 = x^321
sage: expr1 - expr2 # indirect doctest
-x^321 + x^123
sage: O(x) - O(x)
O(x)
"""
return self + self.parent().coefficient_ring(-1)*other
def _mul_term_(self, term):
r"""
Helper method: multiply this asymptotic expansion by the
asymptotic term ``term``.
INPUT:
- ``term`` -- an asymptotic term (see
:doc:`term_monoid`).
OUTPUT:
The product as an :class:`AsymptoticExpansion`.
TESTS::
sage: from sage.rings.asymptotic.term_monoid import OTermMonoid
sage: R.<x> = AsymptoticRing(growth_group='x^ZZ', coefficient_ring=ZZ)
sage: T = OTermMonoid(R.growth_group, ZZ)
sage: expr = 10*x^2 + O(x)
sage: t = T(R.growth_group.gen())
sage: expr._mul_term_(t)
O(x^3)
"""
simplify = not term.is_exact()
return self.parent()(self.summands.mapped(lambda element: term * element),
simplify=simplify, convert=False)
def _mul_(self, other):
r"""
Multiply this asymptotic expansion by another asymptotic expansion ``other``.
INPUT:
- ``other`` -- an :class:`AsymptoticExpansion`.
OUTPUT:
The product as an :class:`AsymptoticExpansion`.
EXAMPLES::
sage: R.<x> = AsymptoticRing(growth_group='x^ZZ', coefficient_ring=ZZ)
sage: ex1 = 5*x^12
sage: ex2 = x^3 + O(x)
sage: ex1 * ex2 # indirect doctest
5*x^15 + O(x^13)
.. TODO::
The current implementation is the standard long
multiplication. More efficient variants like Karatsuba
multiplication, or methods that exploit the structure
of the underlying poset shall be implemented at a later
point.
TESTS::
sage: R(1) * R(0)
0
sage: _.parent()
Asymptotic Ring <x^ZZ> over Integer Ring
"""
return sum(iter(self._mul_term_(term_other) for
term_other in other.summands.elements()),
self.parent().zero())
def _lmul_(self, other):
r"""
Multiply this asymptotic expansion by an element ``other`` of its
coefficient ring.
INPUT:
- ``other`` -- an element of the coefficient ring.
OUTPUT:
An :class:`AsymptoticExpansion`.
TESTS::
sage: A.<a> = AsymptoticRing(growth_group='QQ^a * a^QQ * log(a)^QQ', coefficient_ring=ZZ)
sage: 2*a # indirect doctest
2*a
"""
if other.is_zero():
return self.parent().zero()
from .term_monoid import TermMonoid
E = TermMonoid('exact', asymptotic_ring=self.parent())
e = E(self.parent().growth_group.one(), coefficient=other)
return self._mul_term_(e)
def _div_(self, other):
r"""
Divide this element through ``other``.
INPUT:
- ``other`` -- an asymptotic expansion.
OUTPUT:
An asymptotic expansion.
EXAMPLES::
sage: R.<x> = AsymptoticRing('x^ZZ', QQ, default_prec=5)
sage: 1/x^42
x^(-42)
sage: (1 + 4*x) / (x + 2*x^2)
2*x^(-1) - 1/2*x^(-2) + 1/4*x^(-3) - 1/8*x^(-4) + 1/16*x^(-5) + O(x^(-6))
sage: x / O(x)
Traceback (most recent call last):
...
ZeroDivisionError: Cannot invert O(x).
TESTS:
See :trac:`19521`::
sage: A.<n> = AsymptoticRing('n^ZZ', SR.subring(no_variables=True))
sage: (A.one() / 1).parent()
Asymptotic Ring <n^ZZ> over Symbolic Constants Subring
"""
return self * ~other
def __invert__(self, precision=None):
r"""
Return the multiplicative inverse of this element.
INPUT:
- ``precision`` -- the precision used for truncating the
expansion. If ``None`` (default value) is used, the
default precision of the parent is used.
OUTPUT:
An asymptotic expansion.
.. WARNING::
Due to truncation of infinite expansions, the element
returned by this method might not fulfill
``el * ~el == 1``.
.. TODO::
As soon as `L`-terms are implemented, this
implementation has to be adapted as well in order to
yield correct results.
EXAMPLES::
sage: R.<x> = AsymptoticRing(growth_group='x^ZZ', coefficient_ring=QQ, default_prec=4)
sage: ~x
x^(-1)
sage: ~(x^42)
x^(-42)
sage: ex = ~(1 + x); ex
x^(-1) - x^(-2) + x^(-3) - x^(-4) + O(x^(-5))
sage: ex * (1+x)
1 + O(x^(-4))
sage: ~(1 + O(1/x))
1 + O(x^(-1))
TESTS::
sage: A.<a> = AsymptoticRing(growth_group='a^ZZ', coefficient_ring=ZZ)
sage: (1 / a).parent()
Asymptotic Ring <a^ZZ> over Rational Field
sage: (a / 2).parent()
Asymptotic Ring <a^ZZ> over Rational Field
::
sage: ~A(0)
Traceback (most recent call last):
...
ZeroDivisionError: Cannot invert 0 in
Asymptotic Ring <a^ZZ> over Integer Ring.
::
sage: B.<s, t> = AsymptoticRing(growth_group='s^ZZ * t^ZZ', coefficient_ring=QQ)
sage: ~(s + t)
Traceback (most recent call last):
...
ValueError: Cannot determine main term of s + t since there
are several maximal elements s, t.
"""
if not self.summands:
raise ZeroDivisionError(
'Cannot invert {} in {}.'.format(self, self.parent()))
(imax_elem, x) = self._main_term_relative_error_(return_inverse_main_term=True)
one = x.parent().one()
if x:
import itertools
result = AsymptoticExpansion._power_series_(
coefficients=itertools.repeat(one),
start=one,
ratio=-x,
ratio_start=one,
precision=precision)
else:
result = one
return result._mul_term_(imax_elem)
invert = __invert__
def truncate(self, precision=None):
r"""
Truncate this asymptotic expansion.
INPUT:
- ``precision`` -- a positive integer or ``None``. Number of
summands that are kept. If ``None`` (default value) is
given, then ``default_prec`` from the parent is used.
OUTPUT:
An asymptotic expansion.
.. NOTE::
For example, truncating an asymptotic expansion with
``precision=20`` does not yield an expansion with exactly 20
summands! Rather than that, it keeps the 20 summands
with the largest growth, and adds appropriate
`O`-Terms.
EXAMPLES::
sage: R.<x> = AsymptoticRing('x^ZZ', QQ)
sage: ex = sum(x^k for k in range(5)); ex
x^4 + x^3 + x^2 + x + 1
sage: ex.truncate(precision=2)
x^4 + x^3 + O(x^2)
sage: ex.truncate(precision=0)
O(x^4)
sage: ex.truncate()
x^4 + x^3 + x^2 + x + 1
"""
if precision is None:
precision = self.parent().default_prec
if len(self.summands) <= precision:
return self
summands = self.summands.copy()
from .term_monoid import TermMonoid
def convert_terms(element):
if convert_terms.count < precision:
convert_terms.count += 1
return element
T = TermMonoid(term_monoid='O', asymptotic_ring=self.parent())
return T(element)
convert_terms.count = 0
summands.map(convert_terms, topological=True, reverse=True)
return self.parent()(summands, simplify=True, convert=False)
def exact_part(self):
r"""
Return the expansion consisting of all exact terms of this
expansion.
INPUT:
Nothing
OUTPUT:
An asymptotic expansion.
EXAMPLES::
sage: R.<x> = AsymptoticRing('x^QQ * log(x)^QQ', QQ)
sage: (x^2 + O(x)).exact_part()
x^2
sage: (x + log(x)/2 + O(log(x)/x)).exact_part()
x + 1/2*log(x)
TESTS::
sage: R.<x, y> = AsymptoticRing('x^QQ * y^QQ', QQ)
sage: (x + y + O(1/(x*y))).exact_part()
x + y
sage: O(x).exact_part()
0
"""
exact_terms = self.summands.copy()
for term in self.summands.elements_topological():
if not term.is_exact():
exact_terms.remove(term.growth)
return self.parent(exact_terms)
def __pow__(self, exponent, precision=None):
r"""
Calculate the power of this asymptotic expansion to the given ``exponent``.
INPUT:
- ``exponent`` -- an element.
- ``precision`` -- the precision used for truncating the
expansion. If ``None`` (default value) is used, the
default precision of the parent is used.
OUTPUT:
An asymptotic expansion.
EXAMPLES::
sage: Q.<x> = AsymptoticRing(growth_group='x^QQ', coefficient_ring=QQ)
sage: x^(1/7)
x^(1/7)
sage: (x^(1/2) + O(x^0))^15
x^(15/2) + O(x^7)
::
sage: Z.<y> = AsymptoticRing(growth_group='y^ZZ', coefficient_ring=ZZ)
sage: y^(1/7)
y^(1/7)
sage: _.parent()
Asymptotic Ring <y^QQ> over Rational Field
sage: (y^2 + O(y))^(1/2)
y + O(1)
sage: (y^2 + O(y))^(-2)
y^(-4) + O(y^(-5))
sage: (1 + 1/y + O(1/y^3))^pi
1 + pi*y^(-1) + (1/2*pi*(pi - 1))*y^(-2) + O(y^(-3))
::
sage: B.<z> = AsymptoticRing(growth_group='z^QQ * log(z)^QQ', coefficient_ring=QQ)
sage: (z^2 + O(z))^(1/2)
z + O(1)
::
sage: A.<x> = AsymptoticRing('QQ^x * x^SR * log(x)^ZZ', QQ)
sage: x * 2^x
2^x*x
sage: 5^x * 2^x
10^x
sage: 2^log(x)
x^(log(2))
sage: 2^(x + 1/x)
2^x + log(2)*2^x*x^(-1) + 1/2*log(2)^2*2^x*x^(-2) + ... + O(2^x*x^(-20))
sage: _.parent()
Asymptotic Ring <QQ^x * x^SR * log(x)^QQ> over Symbolic Ring
::
sage: C.<c> = AsymptoticRing(growth_group='QQ^c * c^QQ', coefficient_ring=QQ, default_prec=5)
sage: (3 + 1/c^2)^c
3^c + 1/3*3^c*c^(-1) + 1/18*3^c*c^(-2) - 4/81*3^c*c^(-3)
- 35/1944*3^c*c^(-4) + O(3^c*c^(-5))
sage: _.parent()
Asymptotic Ring <QQ^c * c^QQ> over Rational Field
sage: (2 + (1/3)^c)^c
2^c + 1/2*(2/3)^c*c + 1/8*(2/9)^c*c^2 - 1/8*(2/9)^c*c
+ 1/48*(2/27)^c*c^3 + O((2/27)^c*c^2)
sage: _.parent()
Asymptotic Ring <QQ^c * c^QQ> over Rational Field
TESTS:
See :trac:`19110`::
sage: O(x)^(-1)
Traceback (most recent call last):
...
ZeroDivisionError: Cannot take O(x) to exponent -1.
> *previous* ZeroDivisionError: rational division by zero
::
sage: B.<z> = AsymptoticRing(growth_group='z^QQ * log(z)^QQ', coefficient_ring=QQ, default_prec=5)
sage: z^(1+1/z)
z + log(z) + 1/2*z^(-1)*log(z)^2 + 1/6*z^(-2)*log(z)^3 +
1/24*z^(-3)*log(z)^4 + O(z^(-4)*log(z)^5)
sage: _.parent()
Asymptotic Ring <z^QQ * log(z)^QQ> over Rational Field
::
sage: B(0)^(-7)
Traceback (most recent call last):
...
ZeroDivisionError: Cannot take 0 to the negative exponent -7.
sage: B(0)^SR.var('a')
Traceback (most recent call last):
...
NotImplementedError: Taking 0 to the exponent a not implemented.
::
sage: C.<s, t> = AsymptoticRing(growth_group='s^QQ * t^QQ', coefficient_ring=QQ)
sage: (s + t)^s
Traceback (most recent call last):
...
ValueError: Cannot take s + t to the exponent s.
> *previous* ValueError: Cannot determine main term of s + t
since there are several maximal elements s, t.
Check that :trac:`19946` is fixed::
sage: A.<n> = AsymptoticRing('QQ^n * n^QQ', SR)
sage: e = 2^n; e
2^n
sage: e.parent()
Asymptotic Ring <SR^n * n^SR> over Symbolic Ring
sage: e = A(e); e
2^n
sage: e.parent()
Asymptotic Ring <QQ^n * n^QQ> over Symbolic Ring
"""
if not self.summands:
if exponent == 0:
return self.parent().one()
elif exponent > 0:
return self.parent().zero()
elif exponent < 0:
raise ZeroDivisionError('Cannot take %s to the negative exponent %s.' %
(self, exponent))
else:
raise NotImplementedError('Taking %s to the exponent %s not implemented.' %
(self, exponent))
elif exponent == 0:
return self.parent().one()
elif exponent == 1:
return self
elif len(self.summands) == 1:
element = next(self.summands.elements())
if isinstance(exponent, AsymptoticExpansion) and element.is_constant():
return exponent.rpow(base=element.coefficient, precision=precision)
try:
return self.parent()._create_element_in_extension_(
element ** exponent, element.parent())
except (ArithmeticError, TypeError, ValueError):
if not isinstance(exponent, AsymptoticExpansion):
raise
from sage.rings.integer_ring import ZZ
try:
exponent = ZZ(exponent)
except (TypeError, ValueError):
pass
else:
return super(AsymptoticExpansion, self).__pow__(exponent)
from sage.rings.rational_field import QQ
try:
exponent = QQ(exponent)
except (TypeError, ValueError):
pass
else:
return self.__pow_number__(exponent, precision=precision)
from sage.symbolic.expression import Expression
if isinstance(exponent, Expression) and exponent.is_constant():
return self.__pow_number__(exponent, precision=precision)
if isinstance(exponent, AsymptoticExpansion) and len(self.summands) != 1:
try:
return self.__pow_number__(exponent, precision=precision,
check_convergence=True)
except NoConvergenceError:
pass
try:
return (exponent * self.log(precision=precision)).exp(precision=precision)
except (TypeError, ValueError, ZeroDivisionError) as e:
from .misc import combine_exceptions
raise combine_exceptions(
ValueError('Cannot take %s to the exponent %s.' % (self, exponent)), e)
pow = __pow__
def __pow_number__(self, exponent, precision=None, check_convergence=False):
r"""
Return the power of this asymptotic expansion to some
number (``exponent``).
Let `m` be the maximal element of this asymptotic expansion
and `r` the remaining summands. This method calculates
.. MATH::
(m + r)^{\mathit{exponent}}
= m^{\mathit{exponent}} \sum_{k=0}^K
\binom{\mathit{exponent}}{k} (r/m)^k
where `K` is chosen such that adding an additional summand
does not change the result.
INPUT:
- ``exponent`` -- a numerical value (e.g. integer, rational)
or other constant.
- ``precision`` -- a non-negative integer.
- ``check_convergence`` -- (default: ``False``) a boolean. If set,
then an additional check on the input is performed to ensure
that the calculated sum converges.
OUTPUT:
An asymptotic expansion.
.. SEEALSO::
:meth:`pow`
TESTS::
sage: R.<x> = AsymptoticRing(growth_group='x^ZZ', coefficient_ring=ZZ)
sage: (1 + x).__pow_number__(4)
x^4 + 4*x^3 + 6*x^2 + 4*x + 1
sage: _.parent()
Asymptotic Ring <x^ZZ> over Rational Field
sage: (x + 1).__pow_number__(1/2, precision=5)
x^(1/2) + 1/2*x^(-1/2) - 1/8*x^(-3/2) + 1/16*x^(-5/2)
- 5/128*x^(-7/2) + O(x^(-9/2))
sage: _.parent()
Asymptotic Ring <x^QQ> over Rational Field
sage: (8 + 1/x).__pow_number__(1/3, precision=5)
2 + 1/12*x^(-1) - 1/288*x^(-2) + 5/20736*x^(-3)
- 5/248832*x^(-4) + O(x^(-5))
sage: _.parent()
Asymptotic Ring <x^QQ> over Rational Field
::
sage: R(0).__pow_number__(-3/2)
Traceback (most recent call last):
...
ZeroDivisionError: Cannot take 0 to the negative exponent -3/2.
sage: R(0).__pow_number__(RIF(-1,1))
Traceback (most recent call last):
...
ValueError: Possible division by zero, since sign of
the exponent 0.? cannot be determined.
sage: R(0)^0
1
::
sage: A.<a, b> = AsymptoticRing(growth_group='a^ZZ * b^ZZ', coefficient_ring=QQ)
sage: (a + b).__pow_number__(3/2)
Traceback (most recent call last):
...
ValueError: Cannot determine main term of a + b since
there are several maximal elements a, b.
::
sage: S.<s> = AsymptoticRing(growth_group='QQ^s * s^ZZ', coefficient_ring=QQ)
sage: (2 + 2/s^2).__pow_number__(s, precision=7)
2^s + 2^s*s^(-1) + 1/2*2^s*s^(-2) - 1/3*2^s*s^(-3)
- 11/24*2^s*s^(-4) + 11/120*2^s*s^(-5)
+ 271/720*2^s*s^(-6) + O(2^s*s^(-7))
sage: _.parent()
Asymptotic Ring <QQ^s * s^QQ> over Rational Field
"""
if not self.summands:
if exponent > 0:
return self.parent().zero()
elif exponent.is_zero():
return self.parent().one()
elif exponent < 0:
raise ZeroDivisionError(
'Cannot take {} to the negative '
'exponent {}.'.format(self, exponent))
else:
raise ValueError(
'Possible division by zero, since sign of the exponent '
'{} cannot be determined.'.format(exponent))
elif len(self.summands) == 1:
element = next(self.summands.elements())
return self.parent()._create_element_in_extension_(
element**exponent, element.parent())
try:
(max_elem, x) = self._main_term_relative_error_()
except ValueError:
if check_convergence:
raise NoConvergenceError
raise
if check_convergence:
if not (x * exponent).is_little_o_of_one():
raise NoConvergenceError
pmax = self.parent()(max_elem)**exponent
import itertools
def binomials(a):
P = a.parent()
a = a + 1
f = P(1)
for k in itertools.count(1):
k = P(k)
b = a - k
if b == 0:
return
f *= b / k
yield f
one = x.parent().one()
result = AsymptoticExpansion._power_series_(
coefficients=binomials(exponent),
start=one,
ratio=x,
ratio_start=one,
precision=precision)
return result * pmax
def sqrt(self, precision=None):
r"""
Return the square root of this asymptotic expansion.
INPUT:
- ``precision`` -- the precision used for truncating the
expansion. If ``None`` (default value) is used, the
default precision of the parent is used.
OUTPUT:
An asymptotic expansion.
EXAMPLES::
sage: A.<s> = AsymptoticRing(growth_group='s^QQ', coefficient_ring=QQ)
sage: s.sqrt()
s^(1/2)
sage: a = (1 + 1/s).sqrt(precision=6); a
1 + 1/2*s^(-1) - 1/8*s^(-2) + 1/16*s^(-3)
- 5/128*s^(-4) + 7/256*s^(-5) + O(s^(-6))
.. SEEALSO::
:meth:`pow`, :meth:`rpow`, :meth:`exp`.
TESTS::
sage: P.<p> = PowerSeriesRing(QQ, default_prec=6)
sage: bool(SR(a.exact_part()).subs(s=1/x) -
....: SR((1+p).sqrt().polynomial()).subs(p=x) == 0)
True
"""
from sage.rings.rational_field import QQ
return self.pow(QQ(1)/QQ(2), precision=precision)
def O(self):
r"""
Convert all terms in this asymptotic expansion to `O`-terms.
INPUT:
Nothing.
OUTPUT:
An asymptotic expansion.
EXAMPLES::
sage: AR.<x> = AsymptoticRing(growth_group='x^ZZ', coefficient_ring=ZZ)
sage: O(x)
O(x)
sage: type(O(x))
<class 'sage.rings.asymptotic.asymptotic_ring.AsymptoticRing_with_category.element_class'>
sage: expr = 42*x^42 + x^10 + O(x^2); expr
42*x^42 + x^10 + O(x^2)
sage: expr.O()
O(x^42)
sage: (2*x).O()
O(x)
.. SEEALSO::
:func:`sage.rings.power_series_ring.PowerSeriesRing`,
:func:`sage.rings.laurent_series_ring.LaurentSeriesRing`.
TESTS::
sage: AR(0).O()
Traceback (most recent call last):
...
NotImplementedOZero: The error term in the result is O(0)
which means 0 for sufficiently large x.
"""
if not self.summands:
from .misc import NotImplementedOZero
raise NotImplementedOZero(self.parent())
return sum(self.parent().create_summand('O', growth=element)
for element in self.summands.maximal_elements())
def log(self, base=None, precision=None):
r"""
The logarithm of this asymptotic expansion.
INPUT:
- ``base`` -- the base of the logarithm. If ``None``
(default value) is used, the natural logarithm is taken.
- ``precision`` -- the precision used for truncating the
expansion. If ``None`` (default value) is used, the
default precision of the parent is used.
OUTPUT:
An asymptotic expansion.
.. NOTE::
Computing the logarithm of an asymptotic expansion
is possible if and only if there is exactly one maximal
summand in the expansion.
ALGORITHM:
If the expansion has more than one summand,
the asymptotic expansion for `\log(1+t)` as `t` tends to `0`
is used.
.. TODO::
As soon as `L`-terms are implemented, this
implementation has to be adapted as well in order to
yield correct results.
EXAMPLES::
sage: R.<x> = AsymptoticRing(growth_group='x^ZZ * log(x)^ZZ', coefficient_ring=QQ)
sage: log(x)
log(x)
sage: log(x^2)
2*log(x)
sage: log(x-1)
log(x) - x^(-1) - 1/2*x^(-2) - 1/3*x^(-3) - ... + O(x^(-21))
TESTS::
sage: log(R(1))
0
sage: log(R(0))
Traceback (most recent call last):
...
ArithmeticError: Cannot compute log(0) in
Asymptotic Ring <x^ZZ * log(x)^ZZ> over Rational Field.
sage: C.<s, t> = AsymptoticRing(growth_group='s^ZZ * t^ZZ', coefficient_ring=QQ)
sage: log(s + t)
Traceback (most recent call last):
...
ValueError: Cannot determine main term of s + t since
there are several maximal elements s, t.
"""
P = self.parent()
if not self.summands:
raise ArithmeticError('Cannot compute log(0) in %s.' % (self.parent(),))
elif len(self.summands) == 1:
if self.is_one():
return P.zero()
element = next(self.summands.elements())
return sum(P._create_element_in_extension_(l, element.parent())
for l in element.log_term(base=base))
(max_elem, x) = self._main_term_relative_error_()
geom = -x
from sage.rings.integer_ring import ZZ
import itertools
result = - AsymptoticExpansion._power_series_(
coefficients=iter(1 / ZZ(k)
for k in itertools.count(2)),
start=geom,
ratio=geom,
ratio_start=geom,
precision=precision)
result += x.parent()(max_elem).log()
if base:
from sage.functions.log import log
result = result / log(base)
return result
def is_exact(self):
r"""
Return whether all terms of this expansion are exact.
OUTPUT:
A boolean.
EXAMPLES::
sage: A.<x> = AsymptoticRing('x^QQ * log(x)^QQ', QQ)
sage: (x^2 + O(x)).is_exact()
False
sage: (x^2 - x).is_exact()
True
TESTS::
sage: A(0).is_exact()
True
sage: A.one().is_exact()
True
"""
return all(T.is_exact() for T in self.summands)
def is_little_o_of_one(self):
r"""
Return whether this expansion is of order `o(1)`.
INPUT:
Nothing.
OUTPUT:
A boolean.
EXAMPLES::
sage: A.<x> = AsymptoticRing('x^ZZ * log(x)^ZZ', QQ)
sage: (x^4 * log(x)^(-2) + x^(-4) * log(x)^2).is_little_o_of_one()
False
sage: (x^(-1) * log(x)^1234 + x^(-2) + O(x^(-3))).is_little_o_of_one()
True
sage: (log(x) - log(x-1)).is_little_o_of_one()
True
::
sage: A.<x, y> = AsymptoticRing('x^QQ * y^QQ * log(y)^ZZ', QQ)
sage: (x^(-1/16) * y^32 + x^32 * y^(-1/16)).is_little_o_of_one()
False
sage: (x^(-1) * y^(-3) + x^(-3) * y^(-1)).is_little_o_of_one()
True
sage: (x^(-1) * y / log(y)).is_little_o_of_one()
False
sage: (log(y-1)/log(y) - 1).is_little_o_of_one()
True
"""
return all(term.is_little_o_of_one() for term in self.summands.maximal_elements())
def rpow(self, base, precision=None):
r"""
Return the power of ``base`` to this asymptotic expansion.
INPUT:
- ``base`` -- an element or ``'e'``.
- ``precision`` -- the precision used for truncating the
expansion. If ``None`` (default value) is used, the
default precision of the parent is used.
OUTPUT:
An asymptotic expansion.
EXAMPLES::
sage: A.<x> = AsymptoticRing('x^ZZ', QQ)
sage: (1/x).rpow('e', precision=5)
1 + x^(-1) + 1/2*x^(-2) + 1/6*x^(-3) + 1/24*x^(-4) + O(x^(-5))
TESTS::
sage: x.rpow(SR.var('y'))
Traceback (most recent call last):
...
ArithmeticError: Cannot construct y^x in Growth Group x^ZZ
> *previous* TypeError: unsupported operand parent(s) for '*':
'Growth Group x^ZZ' and 'Growth Group SR^x'
Check that :trac:`19946` is fixed::
sage: A.<n> = AsymptoticRing('QQ^n * n^QQ', SR)
sage: n.rpow(2)
2^n
sage: _.parent()
Asymptotic Ring <QQ^n * n^SR> over Symbolic Ring
"""
if isinstance(base, AsymptoticExpansion):
return base.__pow__(self, precision=precision)
P = self.parent()
# first: remove terms from a copy of this term such that a
# term in o(1) remains
expr_o = self.summands.copy()
large_terms = []
for term in self.summands.elements_topological():
if not term.is_little_o_of_one():
large_terms.append(term)
expr_o.remove(term.growth)
expr_o = P(expr_o)
# next: try to take the exponential function of the large elements
try:
large_result = P.prod(
P._create_element_in_extension_(term.rpow(base),
term.parent())
for term in large_terms)
except (TypeError, ValueError) as e:
from .misc import combine_exceptions
raise combine_exceptions(
ValueError('Cannot construct the power of %s to the '
'exponent %s in %s.' %
(base, self, self.parent())), e)
# then: expand expr_o
if not expr_o:
return large_result
if base == 'e':
geom = expr_o
else:
from sage.functions.log import log
geom = expr_o * log(base)
P = geom.parent()
from sage.rings.integer_ring import ZZ
import itertools
def inverted_factorials():
f = ZZ(1)
for k in itertools.count(1):
f /= ZZ(k)
yield f
result = AsymptoticExpansion._power_series_(
coefficients=inverted_factorials(),
start=P.one(),
ratio=geom,
ratio_start=P.one(),
precision=precision)
return result * large_result
def _main_term_relative_error_(self, return_inverse_main_term=False):
r"""
Split this asymptotic expansion into `m(1+x)` with `x=o(1)`.
INPUT:
- ``return_inverse_main_term`` -- (default: ``False``) a boolean.
If set, then the pair `(m^{-1},x)` is returned instead of `(m,x)`.
OUTPUT:
A pair (``m``, ``x``) consisting of
a :mod:`term <sage.rings.asymptotic.term_monoid>` ``m`` and
an :class:`asymptotic expansion <AsymptoticExpansion>` ``x``.
EXAMPLES::
sage: R.<n> = AsymptoticRing('n^ZZ', QQ)
sage: ex = 2*n^2 + n + O(1/n)
sage: (m, x) = ex._main_term_relative_error_()
sage: m
2*n^2
sage: x
1/2*n^(-1) + O(n^(-3))
sage: ex = 2*n^2 + n
sage: (m, x) = ex._main_term_relative_error_()
sage: m
2*n^2
sage: x
1/2*n^(-1)
sage: ex._main_term_relative_error_(return_inverse_main_term=True)
(1/2*n^(-2), 1/2*n^(-1))
sage: R(0)._main_term_relative_error_()
Traceback (most recent call last):
...
ArithmeticError: Cannot determine main term of 0.
TESTS::
sage: R.<m, n> = AsymptoticRing('n^ZZ*m^ZZ', QQ)
sage: (m + n)._main_term_relative_error_()
Traceback (most recent call last):
...
ValueError: Cannot determine main term of m + n since
there are several maximal elements m, n.
"""
if not self.summands:
raise ArithmeticError("Cannot determine main term of 0.")
max_elem = tuple(self.summands.maximal_elements())
if len(max_elem) != 1:
raise ValueError('Cannot determine main term of {} since there '
'are several maximal elements {}.'.format(
self, ', '.join(str(e) for e in
sorted(max_elem, key=str))))
max_elem = max_elem[0]
imax_elem = ~max_elem
if imax_elem.parent() is max_elem.parent():
new_self = self
else:
new_self = self.parent()._create_element_in_extension_(
imax_elem, max_elem.parent()).parent()(self)
one = new_self.parent().one()
x = - one + new_self._mul_term_(imax_elem)
if return_inverse_main_term:
return (imax_elem, x)
else:
return (max_elem, x)
@staticmethod
def _power_series_(coefficients, start, ratio, ratio_start, precision):
r"""
Return a taylor series.
Let `c_k` be determined by the ``coefficients`` and set
.. MATH::
s_k = c_k \cdot \mathit{ratio\_start} \cdot \mathit{ratio}^k.
The result is
.. MATH::
\mathit{start} + \sum_{k=1}^K s_k
where `K` is chosen such that adding `s_{K+1}` does not change
the result.
INPUT:
- ``coefficients`` -- an iterator.
- ``start`` -- an asymptotic expansion.
- ``ratio`` -- an asymptotic expansion.
- ``ratio_start`` -- an asymptotic expansion.
- ``precision`` -- a non-negative integer. All intermediate
results are truncated to this precision.
OUTPUT:
An asymptotic expansion.
TESTS::
sage: from sage.rings.asymptotic.asymptotic_ring import AsymptoticExpansion
sage: from itertools import count
sage: A.<g> = AsymptoticRing('g^ZZ', QQ)
sage: AsymptoticExpansion._power_series_(
....: coefficients=iter(ZZ(k) for k in count(1)),
....: start=A(42),
....: ratio=1/g,
....: ratio_start=A(5),
....: precision=4)
42 + 5*g^(-1) + 10*g^(-2) + 15*g^(-3) + O(g^(-4))
sage: AsymptoticExpansion._power_series_(
....: coefficients=iter(ZZ(k) for k in count(1)),
....: start=A(42),
....: ratio=1/g+O(1/g^2),
....: ratio_start=A(5),
....: precision=4)
42 + 5*g^(-1) + O(g^(-2))
sage: AsymptoticExpansion._power_series_(
....: coefficients=iter(ZZ(k) for k in count(1)),
....: start=A(42),
....: ratio=1/g+O(1/g^2),
....: ratio_start=A(5),
....: precision=1000000)
42 + 5*g^(-1) + O(g^(-2))
"""
result = start
g = ratio_start
for c in coefficients:
g *= ratio
new_result = (result + c*g).truncate(precision=precision)
if new_result.has_same_summands(result):
break
result = new_result
return result
def exp(self, precision=None):
r"""
Return the exponential of (i.e., the power of `e` to) this asymptotic expansion.
INPUT:
- ``precision`` -- the precision used for truncating the
expansion. If ``None`` (default value) is used, the
default precision of the parent is used.
OUTPUT:
An asymptotic expansion.
.. NOTE::
The exponential function of this expansion can only be
computed exactly if the respective growth element can be
constructed in the underlying growth group.
ALGORITHM:
If the corresponding growth can be constructed, return
the exact exponential function. Otherwise, if this term
is `o(1)`, try to expand the series and truncate
according to the given precision.
.. TODO::
As soon as `L`-terms are implemented, this
implementation has to be adapted as well in order to
yield correct results.
EXAMPLES::
sage: A.<x> = AsymptoticRing('(e^x)^ZZ * x^ZZ * log(x)^ZZ', SR)
sage: exp(x)
e^x
sage: exp(2*x)
(e^x)^2
sage: exp(x + log(x))
e^x*x
::
sage: (x^(-1)).exp(precision=7)
1 + x^(-1) + 1/2*x^(-2) + 1/6*x^(-3) + ... + O(x^(-7))
TESTS::
sage: A.<x> = AsymptoticRing('(e^x)^ZZ * x^QQ * log(x)^QQ', SR)
sage: exp(log(x))
x
sage: log(exp(x))
x
::
sage: exp(x+1)
e*e^x
See :trac:`19521`::
sage: A.<n> = AsymptoticRing('n^ZZ', SR.subring(no_variables=True))
sage: exp(O(n^(-3))).parent()
Asymptotic Ring <n^ZZ> over Symbolic Constants Subring
"""
return self.rpow('e', precision=precision)
def substitute(self, rules=None, domain=None, **kwds):
r"""
Substitute the given ``rules`` in this asymptotic expansion.
INPUT:
- ``rules`` -- a dictionary.
- ``kwds`` -- keyword arguments will be added to the
substitution ``rules``.
- ``domain`` -- (default: ``None``) a parent. The neutral
elements `0` and `1` (rules for the keys ``'_zero_'`` and
``'_one_'``, see note box below) are taken out of this
domain. If ``None``, then this is determined automatically.
OUTPUT:
An object.
.. NOTE::
The neutral element of the asymptotic ring is replaced by
the value to the key ``'_zero_'``; the neutral element of
the growth group is replaced by the value to the key
``'_one_'``.
EXAMPLES::
sage: A.<x> = AsymptoticRing(growth_group='(e^x)^QQ * x^ZZ * log(x)^ZZ', coefficient_ring=QQ, default_prec=5)
::
sage: (e^x * x^2 + log(x)).subs(x=SR('s'))
s^2*e^s + log(s)
sage: _.parent()
Symbolic Ring
::
sage: (x^3 + x + log(x)).subs(x=x+5).truncate(5)
x^3 + 15*x^2 + 76*x + log(x) + 130 + O(x^(-1))
sage: _.parent()
Asymptotic Ring <(e^x)^QQ * x^ZZ * log(x)^ZZ> over Rational Field
::
sage: (e^x * x^2 + log(x)).subs(x=2*x)
4*(e^x)^2*x^2 + log(x) + log(2)
sage: _.parent()
Asymptotic Ring <(e^x)^QQ * x^QQ * log(x)^QQ> over Symbolic Ring
::
sage: (x^2 + log(x)).subs(x=4*x+2).truncate(5)
16*x^2 + 16*x + log(x) + log(4) + 4 + 1/2*x^(-1) + O(x^(-2))
sage: _.parent()
Asymptotic Ring <(e^x)^QQ * x^ZZ * log(x)^ZZ> over Symbolic Ring
::
sage: (e^x * x^2 + log(x)).subs(x=RIF(pi))
229.534211738584?
sage: _.parent()
Real Interval Field with 53 bits of precision
.. SEEALSO::
:meth:`sage.symbolic.expression.Expression.subs`
TESTS::
sage: x.subs({'y': -1})
Traceback (most recent call last):
...
ValueError: Cannot substitute y in x since it is not a generator of
Asymptotic Ring <(e^x)^QQ * x^ZZ * log(x)^ZZ> over Rational Field.
sage: B.<u, v, w> = AsymptoticRing(growth_group='u^QQ * v^QQ * w^QQ', coefficient_ring=QQ)
sage: (1/u).subs({'u': 0})
Traceback (most recent call last):
...
TypeError: Cannot apply the substitution rules {u: 0} on u^(-1) in
Asymptotic Ring <u^QQ * v^QQ * w^QQ> over Rational Field.
> *previous* ZeroDivisionError: Cannot substitute in u^(-1) in
Asymptotic Ring <u^QQ * v^QQ * w^QQ> over Rational Field.
>> *previous* ZeroDivisionError: Cannot substitute in u^(-1) in
Exact Term Monoid u^QQ * v^QQ * w^QQ with coefficients in Rational Field.
>...> *previous* ZeroDivisionError: Cannot substitute in u^(-1) in
Growth Group u^QQ * v^QQ * w^QQ.
>...> *previous* ZeroDivisionError: Cannot substitute in u^(-1) in
Growth Group u^QQ.
>...> *previous* ZeroDivisionError: rational division by zero
sage: (1/u).subs({'u': 0, 'v': SR.var('v')})
Traceback (most recent call last):
...
TypeError: Cannot apply the substitution rules {u: 0, v: v} on u^(-1) in
Asymptotic Ring <u^QQ * v^QQ * w^QQ> over Rational Field.
> *previous* ZeroDivisionError: Cannot substitute in u^(-1) in
Asymptotic Ring <u^QQ * v^QQ * w^QQ> over Rational Field.
>> *previous* ZeroDivisionError: Cannot substitute in u^(-1) in
Exact Term Monoid u^QQ * v^QQ * w^QQ with coefficients in Rational Field.
>...> *previous* ZeroDivisionError: Cannot substitute in u^(-1) in
Growth Group u^QQ * v^QQ * w^QQ.
>...> *previous* ZeroDivisionError: Cannot substitute in u^(-1) in
Growth Group u^QQ.
>...> *previous* ZeroDivisionError: rational division by zero
::
sage: u.subs({u: 0, 'v': SR.var('v')})
0
sage: v.subs({u: 0, 'v': SR.var('v')})
v
sage: _.parent()
Symbolic Ring
::
sage: u.subs({SR.var('u'): -1})
Traceback (most recent call last):
...
TypeError: Cannot substitute u in u since it is neither an
asymptotic expansion nor a string
(but a <type 'sage.symbolic.expression.Expression'>).
::
sage: u.subs({u: 1, 'u': 1})
1
sage: u.subs({u: 1}, u=1)
1
sage: u.subs({u: 1, 'u': 2})
Traceback (most recent call last):
...
ValueError: Cannot substitute in u: duplicate key u.
sage: u.subs({u: 1}, u=3)
Traceback (most recent call last):
...
ValueError: Cannot substitute in u: duplicate key u.
"""
# check if nothing to do
if not rules and not kwds:
return self
# init and process keyword arguments
gens = self.parent().gens()
locals = kwds or dict()
# update with rules
if isinstance(rules, dict):
for k, v in rules.iteritems():
if not isinstance(k, str) and k not in gens:
raise TypeError('Cannot substitute %s in %s '
'since it is neither an '
'asymptotic expansion '
'nor a string (but a %s).' %
(k, self, type(k)))
k = str(k)
if k in locals and locals[k] != v:
raise ValueError('Cannot substitute in %s: '
'duplicate key %s.' % (self, k))
locals[k] = v
elif rules is not None:
raise TypeError('Substitution rules %s have to be a dictionary.' %
(rules,))
# fill up missing rules
for g in gens:
locals.setdefault(str(g), g)
# check if all keys are generators
gens_str = tuple(str(g) for g in gens)
for k in locals:
if str(k) not in gens_str:
raise ValueError('Cannot substitute %s in %s '
'since it is not a generator of %s.' %
(k, self, self.parent()))
# determine 0 and 1
if domain is None and \
('_zero_' not in locals or '_one_' not in locals):
P = self.parent()
for g in gens:
G = locals[str(g)].parent()
if G is not P:
domain = G
break
else:
domain = P
locals.setdefault('_zero_', domain.zero())
locals.setdefault('_one_', domain.one())
# do the actual substitution
try:
return self._substitute_(locals)
except (ArithmeticError, TypeError, ValueError) as e:
from .misc import combine_exceptions
rules = '{' + ', '.join(
'%s: %s' % (k, v)
for k, v in sorted(locals.iteritems(),
key=lambda k: str(k[0]))
if not k.startswith('_') and
not any(k == str(g) and v is g for g in gens)) + '}'
raise combine_exceptions(
TypeError('Cannot apply the substitution rules %s on %s '
'in %s.' % (rules, self, self.parent())), e)
subs = substitute
def _substitute_(self, rules):
r"""
Substitute the given ``rules`` in this asymptotic expansion.
INPUT:
- ``rules`` -- a dictionary.
The neutral element of the asymptotic ring is replaced by the value
to key ``'_zero_'``.
OUTPUT:
An object.
TESTS::
sage: A.<z> = AsymptoticRing(growth_group='z^QQ', coefficient_ring=QQ)
sage: z._substitute_({'z': SR.var('a')})
a
sage: _.parent()
Symbolic Ring
sage: A(0)._substitute_({'_zero_': 'zero'})
'zero'
sage: (1/z)._substitute_({'z': 4})
1/4
sage: _.parent()
Rational Field
sage: (1/z)._substitute_({'z': 0})
Traceback (most recent call last):
...
ZeroDivisionError: Cannot substitute in z^(-1) in
Asymptotic Ring <z^QQ> over Rational Field.
> *previous* ZeroDivisionError: Cannot substitute in z^(-1) in
Exact Term Monoid z^QQ with coefficients in Rational Field.
>> *previous* ZeroDivisionError: Cannot substitute in z^(-1) in
Growth Group z^QQ.
>...> *previous* ZeroDivisionError: rational division by zero
"""
if not self.summands:
return rules['_zero_']
from sage.symbolic.operators import add_vararg
try:
return add_vararg(
*tuple(s._substitute_(rules)
for s in self.summands.elements_topological()))
except (ArithmeticError, TypeError, ValueError) as e:
from .misc import substitute_raise_exception
substitute_raise_exception(self, e)
def compare_with_values(self, variable, function, values,
rescaled=True, ring=RIF):
"""
Compute the (rescaled) difference between this asymptotic
expansion and the given values.
INPUT:
- ``variable`` -- an asymptotic expansion or a string.
- ``function`` -- a callable or symbolic expression giving the
comparison values.
- ``values`` -- a list or iterable of values where the comparison
shall be carried out.
- ``rescaled`` -- (default: ``True``) determines whether
the difference is divided by the error term of the asymptotic
expansion.
- ``ring`` -- (default: ``RIF``) the parent into which the
difference is converted.
OUTPUT:
A list of pairs containing comparison points and (rescaled)
difference values.
EXAMPLES::
sage: A.<n> = AsymptoticRing('QQ^n * n^ZZ', SR)
sage: catalan = binomial(2*x, x)/(x+1)
sage: expansion = 4^n*(1/sqrt(pi)*n^(-3/2)
....: - 9/8/sqrt(pi)*n^(-5/2)
....: + 145/128/sqrt(pi)*n^(-7/2) + O(n^(-9/2)))
sage: expansion.compare_with_values(n, catalan, srange(5, 10))
[(5, 0.5303924444775?),
(6, 0.5455279498787?),
(7, 0.556880411050?),
(8, 0.565710587724?),
(9, 0.572775029098?)]
sage: expansion.compare_with_values(n, catalan, [5, 10, 20], rescaled=False)
[(5, 0.3886263699387?), (10, 19.1842458318?), (20, 931314.63637?)]
sage: expansion.compare_with_values(n, catalan, [5, 10, 20], rescaled=False, ring=SR)
[(5, 168/5*sqrt(5)/sqrt(pi) - 42),
(10, 1178112/125*sqrt(10)/sqrt(pi) - 16796),
(20, 650486218752/125*sqrt(5)/sqrt(pi) - 6564120420)]
Instead of a symbolic expression, a callable function can
be specified as well::
sage: A.<n> = AsymptoticRing('n^ZZ * log(n)^ZZ', SR)
sage: def H(n):
....: return sum(1/k for k in srange(1, n+1))
sage: H_expansion = (log(n) + euler_gamma + 1/(2*n)
....: - 1/(12*n^2) + O(n^-4))
sage: H_expansion.compare_with_values(n, H, srange(25, 30)) # rel tol 1e-6
[(25, -0.008326995?),
(26, -0.008327472?),
(27, -0.008327898?),
(28, -0.00832828?),
(29, -0.00832862?)]
.. SEEALSO::
:meth:`plot_comparison`
TESTS::
sage: A.<x, y> = AsymptoticRing('x^ZZ*y^ZZ', QQ)
sage: expansion = x^2 + O(x) + O(y)
sage: expansion.compare_with_values(y, lambda z: z^2, srange(20, 30))
Traceback (most recent call last):
....
NotImplementedError: exactly one error term required
sage: expansion = x^2
sage: expansion.compare_with_values(y, lambda z: z^2, srange(20, 30))
Traceback (most recent call last):
....
NotImplementedError: exactly one error term required
sage: expansion = x^2 + O(x)
sage: expansion.compare_with_values(y, lambda z: z^2, srange(20, 30))
Traceback (most recent call last):
....
NameError: name 'x' is not defined
sage: expansion.compare_with_values(x, lambda z: z^2, srange(20, 30))
[(20, 0), (21, 0), ..., (29, 0)]
sage: expansion.compare_with_values(x, SR('x*y'), srange(20, 30))
Traceback (most recent call last):
....
NotImplementedError: expression x*y has more than one variable
"""
from .term_monoid import OTerm
from sage.rings.integer_ring import ZZ
main = self.exact_part()
error = self - main
error_terms = list(error.summands)
if len(error_terms) != 1:
raise NotImplementedError("exactly one error term required")
if not isinstance(error_terms[0], OTerm):
raise NotImplementedError("{} is not an O term".format(error))
error_growth = error_terms[0].growth
if hasattr(function, 'variables'):
expr = function
vars = expr.variables()
if len(vars) > 1:
raise NotImplementedError("expression {} has more than one "
"variable".format(expr))
elif len(vars) == 1:
v = vars[0]
def function(arg):
return expr.subs({v: arg})
else:
def function(arg):
return expr
if rescaled:
points = list(
(k, ring((main.subs({variable: k}) - function(k)) /
error_growth._substitute_({str(variable): k,
'_one_': ZZ(1)})))
for k in values)
else:
points = list(
(k, ring(main.subs({variable: k}) - function(k)))
for k in values)
return points
def plot_comparison(self, variable, function, values, rescaled=True,
ring=RIF, relative_tolerance=0.025, **kwargs):
r"""
Plot the (rescaled) difference between this asymptotic
expansion and the given values.
INPUT:
- ``variable`` -- an asymptotic expansion or a string.
- ``function`` -- a callable or symbolic expression giving the
comparison values.
- ``values`` -- a list or iterable of values where the comparison
shall be carried out.
- ``rescaled`` -- (default: ``True``) determines whether
the difference is divided by the error term of the asymptotic
expansion.
- ``ring`` -- (default: ``RIF``) the parent into which the
difference is converted.
- ``relative_tolerance`` -- (default: ``0.025``). Raise error
when relative error exceeds this tolerance.
Other keyword arguments are passed to :func:`list_plot`.
OUTPUT:
A graphics object.
.. NOTE::
If rescaled (i.e. divided by the error term), the output
should be bounded.
This method is mainly meant to have an easily usable
plausability check for asymptotic expansion created in
some way.
EXAMPLES:
We want to check the quality of the asymptotic expansion of
the harmonic numbers::
sage: A.<n> = AsymptoticRing('n^ZZ * log(n)^ZZ', SR)
sage: def H(n):
....: return sum(1/k for k in srange(1, n+1))
sage: H_expansion = (log(n) + euler_gamma + 1/(2*n)
....: - 1/(12*n^2) + O(n^-4))
sage: H_expansion.plot_comparison(n, H, srange(1, 30))
Graphics object consisting of 1 graphics primitive
Alternatively, the unscaled (absolute) difference can be
plotted as well::
sage: H_expansion.plot_comparison(n, H, srange(1, 30),
....: rescaled=False)
Graphics object consisting of 1 graphics primitive
Additional keywords are passed to :func:`list_plot`::
sage: H_expansion.plot_comparison(n, H, srange(1, 30),
....: plotjoined=True, marker='o',
....: color='green')
Graphics object consisting of 1 graphics primitive
.. SEEALSO::
:meth:`compare_with_values`
TESTS::
sage: H_expansion.plot_comparison(n, H, [600])
Traceback (most recent call last):
...
ValueError: Numerical noise is too high, the comparison is inaccurate
sage: H_expansion.plot_comparison(n, H, [600], relative_tolerance=2)
Graphics object consisting of 1 graphics primitive
"""
from sage.plot.plot import list_plot
points = self.compare_with_values(variable, function,
values, rescaled=rescaled, ring=ring)
from sage.rings.real_mpfi import RealIntervalField_class
if isinstance(ring, RealIntervalField_class):
if not all(p[1].relative_diameter() <= relative_tolerance for p in points):
raise ValueError('Numerical noise is too high, the '
'comparison is inaccurate')
# RIFs cannot be plotted, they need to be converted to RR
# (see #15011).
points = [(p[0], p[1].center()) for p in points]
return list_plot(points, **kwargs)
def symbolic_expression(self, R=None):
r"""
Return this asymptotic expansion as a symbolic expression.
INPUT:
- ``R`` -- (a subring of) the symbolic ring or ``None``.
The output is will be an element of ``R``. If ``None``,
then the symbolic ring is used.
OUTPUT:
A symbolic expression.
EXAMPLES::
sage: A.<x, y, z> = AsymptoticRing(growth_group='x^ZZ * y^QQ * log(y)^QQ * QQ^z * z^QQ', coefficient_ring=QQ)
sage: SR(A.an_element()) # indirect doctest
1/8*(1/8)^z*x^3*y^(3/2)*z^(3/2)*log(y)^(3/2) +
Order((1/2)^z*x*sqrt(y)*sqrt(z)*sqrt(log(y)))
TESTS::
sage: a = A.an_element(); a
1/8*x^3*y^(3/2)*log(y)^(3/2)*(1/8)^z*z^(3/2) +
O(x*y^(1/2)*log(y)^(1/2)*(1/2)^z*z^(1/2))
sage: a.symbolic_expression()
1/8*(1/8)^z*x^3*y^(3/2)*z^(3/2)*log(y)^(3/2) +
Order((1/2)^z*x*sqrt(y)*sqrt(z)*sqrt(log(y)))
sage: _.parent()
Symbolic Ring
::
sage: from sage.symbolic.ring import SymbolicRing
sage: class MySymbolicRing(SymbolicRing):
....: pass
sage: mySR = MySymbolicRing()
sage: a.symbolic_expression(mySR).parent() is mySR
True
"""
if R is None:
from sage.symbolic.ring import SR
R = SR
return self.substitute(dict((g, R(R.var(str(g))))
for g in self.parent().gens()),
domain=R)
_symbolic_ = symbolic_expression # will be used by SR._element_constructor_
def map_coefficients(self, f, new_coefficient_ring=None):
r"""
Return the asymptotic expansion obtained by applying ``f`` to
each coefficient of this asymptotic expansion.
INPUT:
- ``f`` -- a callable. A coefficient `c` will be mapped to `f(c)`.
- ``new_coefficient_ring`` -- (default: ``None``) a ring.
OUTPUT:
An asymptotic expansion.
EXAMPLES::
sage: A.<n> = AsymptoticRing(growth_group='n^ZZ', coefficient_ring=ZZ)
sage: a = n^4 + 2*n^3 + 3*n^2 + O(n)
sage: a.map_coefficients(lambda c: c+1)
2*n^4 + 3*n^3 + 4*n^2 + O(n)
sage: a.map_coefficients(lambda c: c-2)
-n^4 + n^2 + O(n)
TESTS::
sage: a.map_coefficients(lambda c: 1/c, new_coefficient_ring=QQ)
n^4 + 1/2*n^3 + 1/3*n^2 + O(n)
sage: _.parent()
Asymptotic Ring <n^ZZ> over Rational Field
sage: a.map_coefficients(lambda c: 1/c)
Traceback (most recent call last):
...
ValueError: ... is not a coefficient in
Exact Term Monoid n^ZZ with coefficients in Integer Ring.
"""
def mapping(term):
T = term.parent().change_parameter(
coefficient_ring=new_coefficient_ring)
if hasattr(term, 'coefficient'):
c = f(term.coefficient)
if c.is_zero():
return None
return T(term.growth, c)
else:
return T(term.growth)
P = self.parent().change_parameter(coefficient_ring=new_coefficient_ring)
S = self.summands.copy()
S.map(mapping)
return P(S, simplify=False, convert=False)
def factorial(self):
r"""
Return the factorial of this asymptotic expansion.
OUTPUT:
An asymptotic expansion.
EXAMPLES::
sage: A.<n> = AsymptoticRing(growth_group='n^ZZ * log(n)^ZZ', coefficient_ring=ZZ, default_prec=5)
sage: n.factorial()
sqrt(2)*sqrt(pi)*e^(n*log(n))*(e^n)^(-1)*n^(1/2)
+ 1/12*sqrt(2)*sqrt(pi)*e^(n*log(n))*(e^n)^(-1)*n^(-1/2)
+ 1/288*sqrt(2)*sqrt(pi)*e^(n*log(n))*(e^n)^(-1)*n^(-3/2)
+ O(e^(n*log(n))*(e^n)^(-1)*n^(-5/2))
sage: _.parent()
Asymptotic Ring <(e^(n*log(n)))^(Symbolic Constants Subring) *
(e^n)^(Symbolic Constants Subring) *
n^(Symbolic Constants Subring) *
log(n)^(Symbolic Constants Subring)>
over Symbolic Constants Subring
:wikipedia:`Catalan numbers <Catalan_number>`
`\frac{1}{n+1}\binom{2n}{n}`::
sage: (2*n).factorial() / n.factorial()^2 / (n+1) # long time
1/sqrt(pi)*(e^n)^(2*log(2))*n^(-3/2)
- 9/8/sqrt(pi)*(e^n)^(2*log(2))*n^(-5/2)
+ 145/128/sqrt(pi)*(e^n)^(2*log(2))*n^(-7/2)
+ O((e^n)^(2*log(2))*n^(-9/2))
Note that this method substitutes the asymptotic expansion into
Stirling's formula. This substitution has to be possible which is
not always guaranteed::
sage: S.<s> = AsymptoticRing(growth_group='s^QQ * log(s)^QQ', coefficient_ring=QQ, default_prec=4)
sage: log(s).factorial()
Traceback (most recent call last):
...
TypeError: Cannot apply the substitution rules {s: log(s)} on
sqrt(2)*sqrt(pi)*e^(s*log(s))*(e^s)^(-1)*s^(1/2)
+ O(e^(s*log(s))*(e^s)^(-1)*s^(-1/2)) in
Asymptotic Ring <(e^(s*log(s)))^QQ * (e^s)^QQ * s^QQ * log(s)^QQ>
over Symbolic Constants Subring.
...
.. SEEALSO::
:meth:`~sage.rings.asymptotic.asymptotic_expansion_generators.AsymptoticExpansionGenerators.Stirling`
TESTS::
sage: A.<m> = AsymptoticRing(growth_group='m^ZZ * log(m)^ZZ', coefficient_ring=QQ, default_prec=5)
sage: m.factorial()
sqrt(2)*sqrt(pi)*e^(m*log(m))*(e^m)^(-1)*m^(1/2)
+ 1/12*sqrt(2)*sqrt(pi)*e^(m*log(m))*(e^m)^(-1)*m^(-1/2)
+ 1/288*sqrt(2)*sqrt(pi)*e^(m*log(m))*(e^m)^(-1)*m^(-3/2)
+ O(e^(m*log(m))*(e^m)^(-1)*m^(-5/2))
::
sage: A(1/2).factorial()
1/2*sqrt(pi)
sage: _.parent()
Asymptotic Ring <m^ZZ * log(m)^ZZ> over Symbolic Ring
::
sage: B.<a, b> = AsymptoticRing('a^ZZ * b^ZZ', QQ, default_prec=3)
sage: b.factorial()
O(e^(b*log(b))*(e^b)^(-1)*b^(1/2))
sage: (a*b).factorial()
Traceback (most recent call last):
...
ValueError: Cannot build the factorial of a*b
since it is not univariate.
"""
vars = self.variable_names()
if len(vars) == 0:
if self.is_zero():
return self.parent().one()
assert len(self.summands) == 1
element = next(self.summands.elements())
return self.parent()._create_element_in_extension_(
element._factorial_(), element.parent())
if len(vars) == 1:
from .asymptotic_expansion_generators import \
asymptotic_expansions
var = vars[0]
S = asymptotic_expansions.Stirling(
var, precision=self.parent().default_prec)
from sage.structure.element import get_coercion_model
cm = get_coercion_model()
P = cm.common_parent(self, S)
return S.subs({var: P.coerce(self)})
else:
raise ValueError(
'Cannot build the factorial of {} since it is not '
'univariate.'.format(self))
def variable_names(self):
r"""
Return the names of the variables of this asymptotic expansion.
OUTPUT:
A tuple of strings.
EXAMPLES::
sage: A.<m, n> = AsymptoticRing('QQ^m * m^QQ * n^ZZ * log(n)^ZZ', QQ)
sage: (4*2^m*m^4*log(n)).variable_names()
('m', 'n')
sage: (4*2^m*m^4).variable_names()
('m',)
sage: (4*log(n)).variable_names()
('n',)
sage: (4*m^3).variable_names()
('m',)
sage: (4*m^0).variable_names()
()
sage: (4*2^m*m^4 + log(n)).variable_names()
('m', 'n')
sage: (2^m + m^4 + log(n)).variable_names()
('m', 'n')
sage: (2^m + m^4).variable_names()
('m',)
"""
vars = sorted(sum(iter(s.variable_names()
for s in self.summands),
tuple()))
from itertools import groupby
return tuple(v for v, _ in groupby(vars))
def _singularity_analysis_(self, var, zeta, precision=None):
r"""
Return the asymptotic growth of the coefficients of some
generating function having this singular expansion around `\zeta`.
INPUT:
- ``var`` -- a string, the variable for the growth of the coefficients,
or the generator of an asymptotic ring.
- ``zeta`` -- location of the singularity
- ``precision`` -- (default: ``None``) an integer. If ``None``, then
the default precision of the parent of this expansion is used.
OUTPUT:
An asymptotic expansion in ``var``.
EXAMPLES::
sage: C.<T> = AsymptoticRing('T^QQ', QQ)
sage: ex = 2 - 2*T^(-1/2) + 2*T^(-1) - 2*T^(-3/2) + O(T^(-2))
sage: ex._singularity_analysis_('n', 1/4, precision=2)
1/sqrt(pi)*4^n*n^(-3/2) - 9/8/sqrt(pi)*4^n*n^(-5/2) + O(4^n*n^(-3))
The parameter ``var`` can also be the generator of an asymptotic
ring::
sage: A.<n> = AsymptoticRing('n^QQ', QQ)
sage: ex._singularity_analysis_(n, 1/4, precision=2)
1/sqrt(pi)*4^n*n^(-3/2) - 9/8/sqrt(pi)*4^n*n^(-5/2) + O(4^n*n^(-3))
If the parameter ``precision`` is omitted, the default precision
of the parent of this expansion is used. ::
sage: C.<T> = AsymptoticRing('T^QQ', QQ, default_prec=1)
sage: ex = 2 - 2*T^(-1/2) + 2*T^(-1) - 2*T^(-3/2) + O(T^(-2))
sage: ex._singularity_analysis_('n', 1/4)
1/sqrt(pi)*4^n*n^(-3/2) + O(4^n*n^(-5/2))
.. SEEALSO::
:meth:`AsymptoticRing.coefficients_of_generating_function`
.. WARNING::
Once singular expansions around points other than infinity
are implemented (:trac:`20050`), this method will be
renamed to ``singularity_analysis``, the parameter
``zeta`` will be dropped (as it will be part of the
singular expansion) and expansions around infinity will no
longer be accepted.
"""
from .misc import NotImplementedOZero
OZeroEncountered = False
if precision is None:
precision = self.parent().default_prec
result = 0
for s in self.summands:
try:
contribution = s._singularity_analysis_(
var=var, zeta=zeta,
precision=precision)
except NotImplementedOZero:
OZeroEncountered = True
else:
result += contribution
if OZeroEncountered and result.is_exact():
raise NotImplementedOZero(self)
return result
class AsymptoticRing(Algebra, UniqueRepresentation):
r"""
A ring consisting of :class:`asymptotic expansions <AsymptoticExpansion>`.
INPUT:
- ``growth_group`` -- either a partially ordered group (see
:doc:`growth_group`) or a string
describing such a growth group (see
:class:`~sage.rings.asymptotic.growth_group.GrowthGroupFactory`).
- ``coefficient_ring`` -- the ring which contains the
coefficients of the expansions.
- ``default_prec`` -- a positive integer. This is the number of
summands that are kept before truncating an infinite series.
- ``category`` -- the category of the parent can be specified
in order to broaden the base structure. It has to be a
subcategory of ``Category of rings``. This is also the default
category if ``None`` is specified.
EXAMPLES:
We begin with the construction of an asymptotic ring in various
ways. First, we simply pass a string specifying the underlying
growth group::
sage: R1_x.<x> = AsymptoticRing(growth_group='x^QQ', coefficient_ring=QQ); R1_x
Asymptotic Ring <x^QQ> over Rational Field
sage: x
x
This is equivalent to the following code, which explicitly
specifies the underlying growth group::
sage: from sage.rings.asymptotic.growth_group import GrowthGroup
sage: G_QQ = GrowthGroup('x^QQ')
sage: R2_x.<x> = AsymptoticRing(growth_group=G_QQ, coefficient_ring=QQ); R2_x
Asymptotic Ring <x^QQ> over Rational Field
Of course, the coefficient ring of the asymptotic ring and the
base ring of the underlying growth group do not need to
coincide::
sage: R_ZZ_x.<x> = AsymptoticRing(growth_group='x^QQ', coefficient_ring=ZZ); R_ZZ_x
Asymptotic Ring <x^QQ> over Integer Ring
Note, we can also create and use logarithmic growth groups::
sage: R_log = AsymptoticRing(growth_group='log(x)^ZZ', coefficient_ring=QQ); R_log
Asymptotic Ring <log(x)^ZZ> over Rational Field
Other growth groups are available. See :doc:`asymptotic_ring` for
more examples.
Below there are some technical details.
According to the conventions for parents, uniqueness is ensured::
sage: R1_x is R2_x
True
Furthermore, the coercion framework is also involved. Coercion
between two asymptotic rings is possible (given that the
underlying growth groups and coefficient rings are chosen
appropriately)::
sage: R1_x.has_coerce_map_from(R_ZZ_x)
True
Additionally, for the sake of convenience, the coefficient ring
also coerces into the asymptotic ring (representing constant
quantities)::
sage: R1_x.has_coerce_map_from(QQ)
True
TESTS::
sage: from sage.rings.asymptotic.asymptotic_ring import AsymptoticRing as AR_class
sage: class AR(AR_class):
....: class Element(AR_class.Element):
....: __eq__ = AR_class.Element.has_same_summands
sage: A = AR(growth_group='z^QQ', coefficient_ring=QQ)
sage: from itertools import islice
sage: TestSuite(A).run( # not tested # long time # see #19424
....: verbose=True,
....: elements=tuple(islice(A.some_elements(), 10)),
....: skip=('_test_some_elements', # to many elements
....: '_test_distributivity')) # due to cancellations: O(z) != O(z^2)
"""
# enable the category framework for elements
Element = AsymptoticExpansion
__default_prec__ = series_precision() # default default-precision
@staticmethod
def __classcall__(cls, growth_group=None, coefficient_ring=None,
names=None, category=None, default_prec=None):
r"""
Normalizes the input in order to ensure a unique
representation of the parent.
For more information see :class:`AsymptoticRing`.
EXAMPLES:
``__classcall__`` unifies the input to the constructor of
:class:`AsymptoticRing` such that the instances generated
are unique. Also, this enables the use of the generation
framework::
sage: from sage.rings.asymptotic.growth_group import GrowthGroup
sage: MG = GrowthGroup('x^ZZ')
sage: AR1 = AsymptoticRing(growth_group=MG, coefficient_ring=ZZ)
sage: AR2.<x> = AsymptoticRing(growth_group='x^ZZ', coefficient_ring=ZZ)
sage: AR1 is AR2
True
The bracket notation can only be used if the growth group
has a generator::
sage: AR.<lx> = AsymptoticRing(growth_group='log(x)^ZZ', coefficient_ring=ZZ)
Traceback (most recent call last):
...
ValueError: Growth Group log(x)^ZZ does not provide any
generators but name 'lx' given.
The names of the generators have to agree with the names used in
the growth group except for univariate rings::
sage: A.<icecream> = AsymptoticRing(growth_group='x^ZZ', coefficient_ring=ZZ); A
Asymptotic Ring <x^ZZ> over Integer Ring
sage: icecream
x
sage: A.<x, y> = AsymptoticRing(growth_group='x^ZZ * y^ZZ', coefficient_ring=ZZ); A
Asymptotic Ring <x^ZZ * y^ZZ> over Integer Ring
sage: A.<y, x> = AsymptoticRing(growth_group='x^ZZ * y^ZZ', coefficient_ring=ZZ)
Traceback (most recent call last):
...
ValueError: Names 'y', 'x' do not coincide with
generators 'x', 'y' of Growth Group x^ZZ * y^ZZ.
sage: A.<a, b> = AsymptoticRing(growth_group='x^ZZ * y^ZZ', coefficient_ring=ZZ)
Traceback (most recent call last):
...
ValueError: Names 'a', 'b' do not coincide with
generators 'x', 'y' of Growth Group x^ZZ * y^ZZ.
sage: A.<x, b> = AsymptoticRing(growth_group='x^ZZ * y^ZZ', coefficient_ring=ZZ)
Traceback (most recent call last):
...
ValueError: Names 'x', 'b' do not coincide with
generators 'x', 'y' of Growth Group x^ZZ * y^ZZ.
sage: A.<x> = AsymptoticRing(growth_group='x^ZZ * y^ZZ', coefficient_ring=ZZ)
Traceback (most recent call last):
...
ValueError: Name 'x' do not coincide with
generators 'x', 'y' of Growth Group x^ZZ * y^ZZ.
sage: A.<x, y, z> = AsymptoticRing(growth_group='x^ZZ * y^ZZ', coefficient_ring=ZZ)
Traceback (most recent call last):
...
ValueError: Names 'x', 'y', 'z' do not coincide with
generators 'x', 'y' of Growth Group x^ZZ * y^ZZ.
TESTS::
sage: AsymptoticRing(growth_group=None, coefficient_ring=ZZ)
Traceback (most recent call last):
...
ValueError: Growth group not specified. Cannot continue.
sage: AsymptoticRing(growth_group='x^ZZ', coefficient_ring=None)
Traceback (most recent call last):
...
ValueError: Coefficient ring not specified. Cannot continue.
sage: AsymptoticRing(growth_group='x^ZZ', coefficient_ring='icecream')
Traceback (most recent call last):
...
ValueError: icecream is not a ring. Cannot continue.
"""
from sage.categories.sets_cat import Sets
from sage.categories.rings import Rings
Sets_parent_class = Sets().parent_class
while issubclass(cls, Sets_parent_class):
cls = cls.__base__
if isinstance(growth_group, str):
from .growth_group import GrowthGroup
growth_group = GrowthGroup(growth_group)
if growth_group is None:
raise ValueError('Growth group not specified. Cannot continue.')
if coefficient_ring is None:
raise ValueError('Coefficient ring not specified. Cannot continue.')
if coefficient_ring not in Rings():
raise ValueError('%s is not a ring. Cannot continue.' % (coefficient_ring,))
strgens = tuple(str(g) for g in growth_group.gens_monomial())
def format_names(N):
return ('s ' if len(N) != 1 else ' ') + ', '.join("'%s'" % n for n in N)
if names and not strgens:
raise ValueError('%s does not provide any generators but name%s given.' %
(growth_group, format_names(names)))
elif names is not None and len(names) == 1 and len(strgens) == 1:
pass
elif names is not None and names != strgens:
raise ValueError('Name%s do not coincide with generator%s of %s.' %
(format_names(names), format_names(strgens), growth_group))
if category is None:
from sage.categories.commutative_algebras import CommutativeAlgebras
from sage.categories.rings import Rings
category = CommutativeAlgebras(Rings())
if default_prec is None:
default_prec = cls.__default_prec__
return super(AsymptoticRing,
cls).__classcall__(cls, growth_group, coefficient_ring,
category=category,
default_prec=default_prec)
@experimental(trac_number=17601)
def __init__(self, growth_group, coefficient_ring, category, default_prec):
r"""
See :class:`AsymptoticRing` for more information.
TESTS::
sage: R1 = AsymptoticRing(growth_group='x^ZZ', coefficient_ring=ZZ); R1
Asymptotic Ring <x^ZZ> over Integer Ring
sage: R2.<x> = AsymptoticRing(growth_group='x^QQ', coefficient_ring=QQ); R2
Asymptotic Ring <x^QQ> over Rational Field
sage: R1 is R2
False
::
sage: R3 = AsymptoticRing('x^ZZ')
Traceback (most recent call last):
...
ValueError: Coefficient ring not specified. Cannot continue.
"""
self._coefficient_ring_ = coefficient_ring
self._growth_group_ = growth_group
self._default_prec_ = default_prec
super(AsymptoticRing, self).__init__(base_ring=coefficient_ring,
category=category)
@property
def growth_group(self):
r"""
The growth group of this asymptotic ring.
EXAMPLES::
sage: AR = AsymptoticRing(growth_group='x^ZZ', coefficient_ring=ZZ)
sage: AR.growth_group
Growth Group x^ZZ
.. SEEALSO::
:doc:`growth_group`
"""
return self._growth_group_
@property
def coefficient_ring(self):
r"""
The coefficient ring of this asymptotic ring.
EXAMPLES::
sage: AR = AsymptoticRing(growth_group='x^ZZ', coefficient_ring=ZZ)
sage: AR.coefficient_ring
Integer Ring
"""
return self._coefficient_ring_
@property
def default_prec(self):
r"""
The default precision of this asymptotic ring.
This is the parameter used to determine how many summands
are kept before truncating an infinite series (which occur
when inverting asymptotic expansions).
EXAMPLES::
sage: AR = AsymptoticRing(growth_group='x^ZZ', coefficient_ring=ZZ)
sage: AR.default_prec
20
sage: AR = AsymptoticRing('x^ZZ', ZZ, default_prec=123)
sage: AR.default_prec
123
"""
return self._default_prec_
def change_parameter(self, **kwds):
r"""
Return an asymptotic ring with a change in one or more of the given parameters.
INPUT:
- ``growth_group`` -- (default: ``None``) the new growth group.
- ``coefficient_ring`` -- (default: ``None``) the new coefficient ring.
- ``category`` -- (default: ``None``) the new category.
- ``default_prec`` -- (default: ``None``) the new default precision.
OUTPUT:
An asymptotic ring.
EXAMPLES::
sage: A = AsymptoticRing(growth_group='x^ZZ', coefficient_ring=ZZ)
sage: A.change_parameter(coefficient_ring=QQ)
Asymptotic Ring <x^ZZ> over Rational Field
TESTS::
sage: A.change_parameter(coefficient_ring=ZZ) is A
True
sage: A.change_parameter(coefficient_ring=None) is A
True
"""
parameters = ('growth_group', 'coefficient_ring', 'default_prec')
values = dict()
for parameter in parameters:
default = getattr(self, parameter)
values[parameter] = kwds.get(parameter, default)
if values[parameter] is None:
values[parameter] = default
values['category'] = self.category()
if isinstance(values['growth_group'], str):
from .growth_group import GrowthGroup
values['growth_group'] = GrowthGroup(values['growth_group'])
if all(values[parameter] is getattr(self, parameter)
for parameter in parameters) and values['category'] is self.category():
return self
from .misc import underlying_class
return underlying_class(self)(**values)
@staticmethod
def _create_empty_summands_():
r"""
Create an empty data structure suitable for storing and working
with summands.
INPUT:
Nothing.
OUTPUT:
A :class:`~sage.data_structures.mutable_poset.MutablePoset`.
TESTS::
sage: AsymptoticRing._create_empty_summands_()
poset()
"""
from sage.data_structures.mutable_poset import MutablePoset
from .term_monoid import can_absorb, absorption
return MutablePoset(key=lambda element: element.growth,
can_merge=can_absorb,
merge=absorption)
def _create_element_in_extension_(self, term, old_term_parent=None):
r"""
Create an element in an extension of this asymptotic ring which
is chosen according to the input.
INPUT:
- ``term`` -- the element data.
- ``old_term_parent`` -- the parent of ``term`` is compared to this
parent. If both are the same or ``old_parent`` is ``None``,
then the result is an expansion in this (``self``) asymptotic ring.
OUTPUT:
An element.
EXAMPLES::
sage: A = AsymptoticRing('z^ZZ', ZZ)
sage: a = next(A.an_element().summands.elements_topological())
sage: B = AsymptoticRing('z^QQ', QQ)
sage: b = next(B.an_element().summands.elements_topological())
sage: c = A._create_element_in_extension_(a, a.parent())
sage: next(c.summands.elements_topological()).parent()
O-Term Monoid z^ZZ with implicit coefficients in Integer Ring
sage: c = A._create_element_in_extension_(b, a.parent())
sage: next(c.summands.elements_topological()).parent()
O-Term Monoid z^QQ with implicit coefficients in Rational Field
TESTS::
sage: c = A._create_element_in_extension_(b, None)
sage: next(c.summands.elements_topological()).parent()
O-Term Monoid z^QQ with implicit coefficients in Rational Field
"""
if old_term_parent is None or term.parent() is old_term_parent:
parent = self
else:
# Insert an 'if' here once terms can have different
# coefficient rings, as this will be for L-terms.
parent = self.change_parameter(
growth_group=term.parent().growth_group,
coefficient_ring=term.parent().coefficient_ring)
return parent(term, simplify=False, convert=False)
def _element_constructor_(self, data, simplify=True, convert=True):
r"""
Convert a given object to this asymptotic ring.
INPUT:
- ``data`` -- an object representing the element to be
initialized.
- ``simplify`` -- (default: ``True``) if set, then the constructed
element is simplified (terms are absorbed) automatically.
- ``convert`` -- (default: ``True``) passed on to the element
constructor. If set, then the ``summands`` are converted to
the asymptotic ring (the parent of this expansion). If not,
then the summands are taken as they are. In that case, the
caller must ensure that the parent of the terms is set
correctly.
OUTPUT:
An element of this asymptotic ring.
TESTS::
sage: AR = AsymptoticRing(growth_group='x^ZZ', coefficient_ring=ZZ)
sage: AR(5) # indirect doctest
5
sage: AR(3*x^2) # indirect doctest
3*x^2
sage: x = ZZ['x'].gen(); x.parent()
Univariate Polynomial Ring in x over Integer Ring
sage: AR(x)
x
sage: y = ZZ['y'].gen(); AR(y) # indirect doctest
Traceback (most recent call last):
...
ValueError: Polynomial y is not in
Asymptotic Ring <x^ZZ> over Integer Ring
> *previous* ValueError: Growth y is not in
Exact Term Monoid x^ZZ with coefficients in Integer Ring.
>> *previous* ValueError: y is not in Growth Group x^ZZ.
::
sage: A = AsymptoticRing(growth_group='p^ZZ', coefficient_ring=QQ)
sage: P.<p> = QQ[]
sage: A(p) # indirect doctest
p
sage: A(p^11) # indirect doctest
p^11
sage: A(2*p^11) # indirect doctest
2*p^11
sage: A(3*p^4 + 7/3*p - 8) # indirect doctest
3*p^4 + 7/3*p - 8
::
sage: S = AsymptoticRing(growth_group='x^ZZ * y^ZZ', coefficient_ring=QQ)
sage: var('x, y')
(x, y)
sage: S(x + y) # indirect doctest
x + y
sage: S(2*x - 4*x*y^6) # indirect doctest
-4*x*y^6 + 2*x
::
sage: A.<a,b> = AsymptoticRing('a^ZZ * b^ZZ', QQ)
sage: 1/a
a^(-1)
::
sage: P.<a, b, c> = ZZ[]
sage: A(a + b)
a + b
sage: A(a + c)
Traceback (most recent call last):
...
ValueError: Polynomial a + c is not in
Asymptotic Ring <a^ZZ * b^ZZ> over Rational Field
> *previous* ValueError: Growth c is not in
Exact Term Monoid a^ZZ * b^ZZ with coefficients in Rational Field.
>> *previous* ValueError: c is not in Growth Group a^ZZ * b^ZZ.
>...> *previous* ValueError: c is not in any of the factors of
Growth Group a^ZZ * b^ZZ
::
sage: M = AsymptoticRing('m^ZZ', ZZ)
sage: N = AsymptoticRing('n^ZZ', QQ)
sage: N(M.an_element()) # indirect doctest
Traceback (most recent call last):
...
ValueError: Cannot include m^3 with parent
Exact Term Monoid m^ZZ with coefficients in Integer Ring
in Asymptotic Ring <n^ZZ> over Rational Field
> *previous* ValueError: m^3 is not in Growth Group n^ZZ
::
sage: M([1]) # indirect doctest
Traceback (most recent call last):
...
TypeError: Not all list entries of [1] are asymptotic terms,
so cannot create an asymptotic expansion in
Asymptotic Ring <m^ZZ> over Integer Ring.
sage: M(SR.var('a') + 1) # indirect doctest
Traceback (most recent call last):
...
ValueError: Symbolic expression a + 1 is not in
Asymptotic Ring <m^ZZ> over Integer Ring.
> *previous* ValueError: a is not in
Exact Term Monoid m^ZZ with coefficients in Integer Ring.
>> *previous* ValueError: Factor a of a is neither a coefficient
(in Integer Ring) nor growth (in Growth Group m^ZZ).
"""
from sage.data_structures.mutable_poset import MutablePoset
if isinstance(data, MutablePoset):
return self.element_class(self, data, simplify=simplify, convert=convert)
if type(data) == self.element_class and data.parent() == self:
return data
if isinstance(data, AsymptoticExpansion):
return self.element_class(self, data.summands,
simplify=simplify, convert=convert)
from .term_monoid import GenericTerm
if isinstance(data, GenericTerm):
data = (data,)
if isinstance(data, (list, tuple)):
if not all(isinstance(elem, GenericTerm) for elem in data):
raise TypeError('Not all list entries of %s '
'are asymptotic terms, so cannot create an '
'asymptotic expansion in %s.' % (data, self))
summands = AsymptoticRing._create_empty_summands_()
summands.union_update(data)
return self.element_class(self, summands,
simplify=simplify, convert=convert)
if not data:
summands = AsymptoticRing._create_empty_summands_()
return self.element_class(self, summands,
simplify=simplify, convert=False)
try:
P = data.parent()
except AttributeError:
return self.create_summand('exact', data)
from .misc import combine_exceptions
from sage.symbolic.ring import SymbolicRing
from sage.rings.polynomial.polynomial_ring import is_PolynomialRing
from sage.rings.polynomial.multi_polynomial_ring_generic import is_MPolynomialRing
from sage.rings.power_series_ring import is_PowerSeriesRing
if isinstance(P, SymbolicRing):
from sage.symbolic.operators import add_vararg
if data.operator() == add_vararg:
summands = []
for summand in data.operands():
# TODO: check if summand is an O-Term here
# (see #19425, #19426)
try:
summands.append(self.create_summand('exact', summand))
except ValueError as e:
raise combine_exceptions(
ValueError('Symbolic expression %s is not in %s.' %
(data, self)), e)
return sum(summands, self.zero())
elif is_PolynomialRing(P):
p = P.gen()
try:
return sum(iter(self.create_summand('exact', growth=p**i,
coefficient=c)
for i, c in enumerate(data)),
self.zero())
except ValueError as e:
raise combine_exceptions(
ValueError('Polynomial %s is not in %s' % (data, self)), e)
elif is_MPolynomialRing(P):
try:
return sum(iter(self.create_summand('exact', growth=g, coefficient=c)
for c, g in iter(data)),
self.zero())
except ValueError as e:
raise combine_exceptions(
ValueError('Polynomial %s is not in %s' % (data, self)), e)
elif is_PowerSeriesRing(P):
raise NotImplementedError(
'Cannot convert %s from the %s to an asymptotic expansion '
'in %s, since growths at other points than +oo are not yet '
'supported.' % (data, P, self))
# Delete lines above as soon as we can deal with growths
# other than the that at going to +oo.
p = P.gen()
try:
result = self(data.polynomial())
except ValueError as e:
raise combine_exceptions(
ValueError('Powerseries %s is not in %s' % (data, self)), e)
prec = data.precision_absolute()
if prec < sage.rings.infinity.PlusInfinity():
try:
result += self.create_summand('O', growth=p**prec)
except ValueError as e:
raise combine_exceptions(
ValueError('Powerseries %s is not in %s' %
(data, self)), e)
return result
return self.create_summand('exact', data)
def _coerce_map_from_(self, R):
r"""
Return whether ``R`` coerces into this asymptotic ring.
INPUT:
- ``R`` -- a parent.
OUTPUT:
A boolean.
.. NOTE::
There are two possible cases: either ``R`` coerces in the
:meth:`coefficient_ring` of this asymptotic ring, or ``R``
itself is an asymptotic ring, where both the
:meth:`growth_group` and the :meth:`coefficient_ring` coerce into
the :meth:`growth_group` and the :meth:`coefficient_ring` of this
asymptotic ring, respectively.
TESTS::
sage: AR_ZZ = AsymptoticRing(growth_group='x^ZZ', coefficient_ring=ZZ); AR_ZZ
Asymptotic Ring <x^ZZ> over Integer Ring
sage: x_ZZ = AR_ZZ.gen()
sage: AR_QQ = AsymptoticRing(growth_group='x^QQ', coefficient_ring=QQ); AR_QQ
Asymptotic Ring <x^QQ> over Rational Field
sage: x_QQ = AR_QQ.gen()
sage: AR_QQ.has_coerce_map_from(AR_ZZ) # indirect doctest
True
sage: x_ZZ * x_QQ
x^2
::
sage: AR_QQ.has_coerce_map_from(QQ)
True
sage: AR_QQ.has_coerce_map_from(ZZ)
True
"""
from sage.data_structures.mutable_poset import MutablePoset
if R == MutablePoset:
return
if self.coefficient_ring.has_coerce_map_from(R):
return True
if self.growth_group.has_coerce_map_from(R):
return True
elif isinstance(R, AsymptoticRing):
if self.growth_group.has_coerce_map_from(R.growth_group) and \
self.coefficient_ring.has_coerce_map_from(R.coefficient_ring):
return True
def _repr_(self):
r"""
A representation string of this asymptotic ring.
INPUT:
Nothing.
OUTPUT:
A string.
EXAMPLES::
sage: AR = AsymptoticRing(growth_group='x^ZZ',
....: coefficient_ring=ZZ)
sage: repr(AR) # indirect doctest
'Asymptotic Ring <x^ZZ> over Integer Ring'
"""
try:
G = '<' + self.growth_group._repr_(condense=True) + '>'
except TypeError:
G = repr(self.growth_group)
return 'Asymptotic Ring %s over %s' % (G, self.coefficient_ring)
def _an_element_(self):
r"""
Return an element of this asymptotic ring.
INPUT:
Nothing.
OUTPUT:
An :class:`AsymptoticExpansion`.
EXAMPLES::
sage: AsymptoticRing(growth_group='z^QQ', coefficient_ring=ZZ).an_element()
z^(3/2) + O(z^(1/2))
sage: AsymptoticRing(growth_group='z^ZZ', coefficient_ring=QQ).an_element()
1/8*z^3 + O(z)
sage: AsymptoticRing(growth_group='z^QQ', coefficient_ring=QQ).an_element()
1/8*z^(3/2) + O(z^(1/2))
"""
from .term_monoid import TermMonoid
E = TermMonoid('exact', asymptotic_ring=self)
O = TermMonoid('O', asymptotic_ring=self)
return self(E.an_element(), simplify=False, convert=False)**3 + \
self(O.an_element(), simplify=False, convert=False)
def some_elements(self):
r"""
Return some elements of this term monoid.
See :class:`TestSuite` for a typical use case.
INPUT:
Nothing.
OUTPUT:
An iterator.
EXAMPLES::
sage: from itertools import islice
sage: A = AsymptoticRing(growth_group='z^QQ', coefficient_ring=ZZ)
sage: tuple(islice(A.some_elements(), 10))
(z^(3/2) + O(z^(1/2)),
O(z^(1/2)),
z^(3/2) + O(z^(-1/2)),
-z^(3/2) + O(z^(1/2)),
O(z^(-1/2)),
O(z^2),
z^6 + O(z^(1/2)),
-z^(3/2) + O(z^(-1/2)),
O(z^2),
z^(3/2) + O(z^(-2)))
"""
from sage.misc.mrange import cantor_product
from .term_monoid import TermMonoid
E = TermMonoid('exact', asymptotic_ring=self)
O = TermMonoid('O', asymptotic_ring=self)
return iter(self(e, simplify=False, convert=False)**3 +
self(o, simplify=False, convert=False)
for e, o in cantor_product(
E.some_elements(), O.some_elements()))
def gens(self):
r"""
Return a tuple with generators of this asymptotic ring.
INPUT:
Nothing.
OUTPUT:
A tuple of asymptotic expansions.
.. NOTE::
Generators do not necessarily exist. This depends on the
underlying growth group. For example,
:class:`monomial growth groups <sage.rings.asymptotic.growth_group.MonomialGrowthGroup>`
have a generator, and exponential growth groups
do not.
EXAMPLES::
sage: AR.<x> = AsymptoticRing(growth_group='x^ZZ', coefficient_ring=ZZ)
sage: AR.gens()
(x,)
sage: B.<y,z> = AsymptoticRing(growth_group='y^ZZ * z^ZZ', coefficient_ring=QQ)
sage: B.gens()
(y, z)
"""
return tuple(self.create_summand('exact',
growth=g,
coefficient=self.coefficient_ring(1))
for g in self.growth_group.gens_monomial())
def gen(self, n=0):
r"""
Return the ``n``-th generator of this asymptotic ring.
INPUT:
- ``n`` -- (default: `0`) a non-negative integer.
OUTPUT:
An asymptotic expansion.
EXAMPLES::
sage: R.<x> = AsymptoticRing(growth_group='x^ZZ', coefficient_ring=ZZ)
sage: R.gen()
x
"""
return self.gens()[n]
def ngens(self):
r"""
Return the number of generators of this asymptotic ring.
INPUT:
Nothing.
OUTPUT:
An integer.
EXAMPLES::
sage: AR.<x> = AsymptoticRing(growth_group='x^ZZ', coefficient_ring=ZZ)
sage: AR.ngens()
1
"""
return len(self.growth_group.gens_monomial())
def coefficients_of_generating_function(self, function, singularities, precision=None,
return_singular_expansions=False):
r"""
Return the asymptotic growth of the coefficients of some
generating function by means of Singularity Analysis.
INPUT:
- ``function`` -- a callable function in one variable.
- ``singularities`` -- list of dominant singularities of the function.
- ``precision`` -- (default: ``None``) an integer. If ``None``, then
the default precision of the asymptotic ring is used.
- ``return_singular_expansions`` -- (default: ``False``) a boolean.
If set, the singular expansions are also returned.
OUTPUT:
- If ``return_singular_expansions=False``: An asymptotic expansion from
this ring.
- If ``return_singular_expansions=True``: A named tuple with
components ``asymptotic_expansion`` and
``singular_expansions``. The former contains an asymptotic
expansion from this ring, the latter is a dictionary which
contains the singular expansions around the singularities.
.. TODO::
Make this method more usable by implementing the
processing of symbolic expressions.
EXAMPLES:
Catalan numbers::
sage: def catalan(z):
....: return (1-(1-4*z)^(1/2))/(2*z)
sage: B.<n> = AsymptoticRing('QQ^n * n^QQ', QQ)
sage: B.coefficients_of_generating_function(catalan, (1/4,), precision=3)
1/sqrt(pi)*4^n*n^(-3/2) - 9/8/sqrt(pi)*4^n*n^(-5/2)
+ 145/128/sqrt(pi)*4^n*n^(-7/2) + O(4^n*n^(-4))
sage: B.coefficients_of_generating_function(catalan, (1/4,), precision=2,
....: return_singular_expansions=True)
SingularityAnalysisResult(asymptotic_expansion=1/sqrt(pi)*4^n*n^(-3/2)
- 9/8/sqrt(pi)*4^n*n^(-5/2) + O(4^n*n^(-3)),
singular_expansions={1/4: 2 - 2*T^(-1/2)
+ 2*T^(-1) - 2*T^(-3/2) + O(T^(-2))})
Unit fractions::
sage: def logarithmic(z):
....: return -log(1-z)
sage: B.coefficients_of_generating_function(logarithmic, (1,), precision=5)
n^(-1) + O(n^(-3))
Harmonic numbers::
sage: def harmonic(z):
....: return -log(1-z)/(1-z)
sage: B.<n> = AsymptoticRing('QQ^n * n^QQ * log(n)^QQ', QQ)
sage: ex = B.coefficients_of_generating_function(harmonic, (1,), precision=13); ex
log(n) + euler_gamma + 1/2*n^(-1) - 1/12*n^(-2) + 1/120*n^(-4)
+ O(n^(-6))
sage: ex.has_same_summands(asymptotic_expansions.HarmonicNumber(
....: 'n', precision=5))
True
.. WARNING::
Once singular expansions around points other than infinity
are implemented (:trac:`20050`), the output in the case
``return_singular_expansions`` will change to return singular
expansions around the singularities.
TESTS::
sage: def f(z):
....: return z/(1-z)
sage: B.coefficients_of_generating_function(f, (1,), precision=3)
Traceback (most recent call last):
...
NotImplementedOZero: The error term in the result is O(0)
which means 0 for sufficiently large n.
"""
from sage.symbolic.ring import SR
from .misc import NotImplementedOZero
singular_expansions = {}
OZeroEncountered = False
A = AsymptoticRing('T^QQ * log(T)^QQ', coefficient_ring=SR,
default_prec=precision)
T = A.gen()
result = A.zero()
for singularity in singularities:
singular_expansion = A(function((1-1/T)*singularity))
singular_expansions[singularity] = singular_expansion
try:
contribution = singular_expansion._singularity_analysis_(
var='Z', zeta=singularity,
precision=precision).subs(Z=self.gen())
except NotImplementedOZero:
OZeroEncountered = True
else:
result += contribution
if OZeroEncountered and result.is_exact():
raise NotImplementedOZero(self)
if return_singular_expansions:
from collections import namedtuple
SingularityAnalysisResult = namedtuple(
'SingularityAnalysisResult',
['asymptotic_expansion', 'singular_expansions'])
return SingularityAnalysisResult(
asymptotic_expansion=result,
singular_expansions=singular_expansions)
else:
return result
def create_summand(self, type, data=None, **kwds):
r"""
Create a simple asymptotic expansion consisting of a single
summand.
INPUT:
- ``type`` -- 'O' or 'exact'.
- ``data`` -- the element out of which a summand has to be created.
- ``growth`` -- an element of the :meth:`growth_group`.
- ``coefficient`` -- an element of the :meth:`coefficient_ring`.
.. NOTE::
Either ``growth`` and ``coefficient`` or ``data`` have to
be specified.
OUTPUT:
An asymptotic expansion.
.. NOTE::
This method calls the factory :class:`TermMonoid
<sage.rings.asymptotic.term_monoid.TermMonoidFactory>`
with the appropriate arguments.
EXAMPLES::
sage: R = AsymptoticRing(growth_group='x^ZZ', coefficient_ring=ZZ)
sage: R.create_summand('O', x^2)
O(x^2)
sage: R.create_summand('exact', growth=x^456, coefficient=123)
123*x^456
sage: R.create_summand('exact', data=12*x^13)
12*x^13
TESTS::
sage: R.create_summand('exact', data='12*x^13')
12*x^13
sage: R.create_summand('exact', data='x^13 * 12')
12*x^13
sage: R.create_summand('exact', data='x^13')
x^13
sage: R.create_summand('exact', data='12')
12
sage: R.create_summand('exact', data=12)
12
::
sage: Z = R.change_parameter(coefficient_ring=Zmod(3))
sage: Z.create_summand('exact', data=42)
0
::
sage: R.create_summand('O', growth=42*x^2, coefficient=1)
Traceback (most recent call last):
...
ValueError: Growth 42*x^2 is not in O-Term Monoid x^ZZ with implicit coefficients in Integer Ring.
> *previous* ValueError: 42*x^2 is not in Growth Group x^ZZ.
::
sage: AR.<z> = AsymptoticRing('z^QQ', QQ)
sage: AR.create_summand('exact', growth='z^2')
Traceback (most recent call last):
...
TypeError: Cannot create exact term: only 'growth' but
no 'coefficient' specified.
"""
from .term_monoid import TermMonoid, ZeroCoefficientError
TM = TermMonoid(type, asymptotic_ring=self)
if data is None:
try:
data = kwds.pop('growth')
except KeyError:
raise TypeError("Neither 'data' nor 'growth' are specified.")
if type == 'exact' and kwds.get('coefficient') is None:
raise TypeError("Cannot create exact term: only 'growth' "
"but no 'coefficient' specified.")
try:
return self(TM(data, **kwds), simplify=False, convert=False)
except ZeroCoefficientError:
return self.zero()
def variable_names(self):
r"""
Return the names of the variables.
OUTPUT:
A tuple of strings.
EXAMPLES::
sage: A = AsymptoticRing(growth_group='x^ZZ * QQ^y', coefficient_ring=QQ)
sage: A.variable_names()
('x', 'y')
"""
return self.growth_group.variable_names()
def construction(self):
r"""
Return the construction of this asymptotic ring.
OUTPUT:
A pair whose first entry is an
:class:`asymptotic ring construction functor <AsymptoticRingFunctor>`
and its second entry the coefficient ring.
EXAMPLES::
sage: A = AsymptoticRing(growth_group='x^ZZ * QQ^y', coefficient_ring=QQ)
sage: A.construction()
(AsymptoticRing<x^ZZ * QQ^y>, Rational Field)
.. SEEALSO::
:doc:`asymptotic_ring`,
:class:`AsymptoticRing`,
:class:`AsymptoticRingFunctor`.
"""
return AsymptoticRingFunctor(self.growth_group), self.coefficient_ring
from sage.categories.pushout import ConstructionFunctor
class AsymptoticRingFunctor(ConstructionFunctor):
r"""
A :class:`construction functor <sage.categories.pushout.ConstructionFunctor>`
for :class:`asymptotic rings <AsymptoticRing>`.
INPUT:
- ``growth_group`` -- a partially ordered group (see
:class:`AsymptoticRing` or
:doc:`growth_group` for details).
EXAMPLES::
sage: AsymptoticRing(growth_group='x^ZZ', coefficient_ring=QQ).construction() # indirect doctest
(AsymptoticRing<x^ZZ>, Rational Field)
.. SEEALSO::
:doc:`asymptotic_ring`,
:class:`AsymptoticRing`,
:class:`sage.rings.asymptotic.growth_group.AbstractGrowthGroupFunctor`,
:class:`sage.rings.asymptotic.growth_group.ExponentialGrowthGroupFunctor`,
:class:`sage.rings.asymptotic.growth_group.MonomialGrowthGroupFunctor`,
:class:`sage.categories.pushout.ConstructionFunctor`.
TESTS::
sage: X = AsymptoticRing(growth_group='x^ZZ', coefficient_ring=QQ)
sage: Y = AsymptoticRing(growth_group='y^ZZ', coefficient_ring=QQ)
sage: cm = sage.structure.element.get_coercion_model()
sage: cm.record_exceptions()
sage: cm.common_parent(X, Y)
Asymptotic Ring <x^ZZ * y^ZZ> over Rational Field
sage: sage.structure.element.coercion_traceback() # not tested
::
sage: from sage.categories.pushout import pushout
sage: pushout(AsymptoticRing(growth_group='x^ZZ', coefficient_ring=ZZ), QQ)
Asymptotic Ring <x^ZZ> over Rational Field
"""
rank = 13
def __init__(self, growth_group):
r"""
See :class:`AsymptoticRingFunctor` for details.
TESTS::
sage: from sage.rings.asymptotic.asymptotic_ring import AsymptoticRingFunctor
sage: from sage.rings.asymptotic.growth_group import GrowthGroup
sage: AsymptoticRingFunctor(GrowthGroup('x^ZZ'))
AsymptoticRing<x^ZZ>
"""
self.growth_group = growth_group
from sage.categories.rings import Rings
super(ConstructionFunctor, self).__init__(
Rings(), Rings())
def _repr_(self):
r"""
Return a representation string of this functor.
OUTPUT:
A string.
TESTS::
sage: AsymptoticRing(growth_group='x^ZZ', coefficient_ring=QQ).construction()[0] # indirect doctest
AsymptoticRing<x^ZZ>
"""
return 'AsymptoticRing<%s>' % (self.growth_group._repr_(condense=True),)
def _apply_functor(self, coefficient_ring):
r"""
Apply this functor to the given ``coefficient_ring``.
INPUT:
- ``base`` - anything :class:`~sage.rings.asymptotic.growth_group.MonomialGrowthGroup` accepts.
OUTPUT:
An :class:`AsymptoticRing`.
EXAMPLES::
sage: A = AsymptoticRing(growth_group='x^ZZ', coefficient_ring=QQ)
sage: F, C = A.construction()
sage: F(C) # indirect doctest
Asymptotic Ring <x^ZZ> over Rational Field
"""
return AsymptoticRing(growth_group=self.growth_group,
coefficient_ring=coefficient_ring)
def merge(self, other):
r"""
Merge this functor with ``other`` if possible.
INPUT:
- ``other`` -- a functor.
OUTPUT:
A functor or ``None``.
EXAMPLES::
sage: X = AsymptoticRing(growth_group='x^ZZ', coefficient_ring=QQ)
sage: Y = AsymptoticRing(growth_group='y^ZZ', coefficient_ring=QQ)
sage: F_X = X.construction()[0]
sage: F_Y = Y.construction()[0]
sage: F_X.merge(F_X)
AsymptoticRing<x^ZZ>
sage: F_X.merge(F_Y)
AsymptoticRing<x^ZZ * y^ZZ>
"""
if self == other:
return self
if isinstance(other, AsymptoticRingFunctor):
from sage.structure.element import get_coercion_model
cm = get_coercion_model()
try:
G = cm.common_parent(self.growth_group, other.growth_group)
except TypeError:
pass
else:
return AsymptoticRingFunctor(G)
def __eq__(self, other):
r"""
Return whether this functor is equal to ``other``.
INPUT:
- ``other`` -- a functor.
OUTPUT:
A boolean.
EXAMPLES::
sage: X = AsymptoticRing(growth_group='x^ZZ', coefficient_ring=QQ)
sage: Y = AsymptoticRing(growth_group='y^ZZ', coefficient_ring=QQ)
sage: F_X = X.construction()[0]
sage: F_Y = Y.construction()[0]
sage: F_X == F_X
True
sage: F_X == F_Y
False
"""
return type(self) == type(other) and \
self.growth_group == other.growth_group
def __ne__(self, other):
r"""
Return whether this functor is not equal to ``other``.
INPUT:
- ``other`` -- a functor.
OUTPUT:
A boolean.
EXAMPLES::
sage: X = AsymptoticRing(growth_group='x^ZZ', coefficient_ring=QQ)
sage: Y = AsymptoticRing(growth_group='y^ZZ', coefficient_ring=QQ)
sage: F_X = X.construction()[0]
sage: F_Y = Y.construction()[0]
sage: F_X != F_X
False
sage: F_X != F_Y
True
"""
return not self == other
| 32.523217
| 121
| 0.538668
|
from __future__ import print_function
from __future__ import absolute_import
from sage.rings.ring import Algebra
from sage.structure.element import CommutativeAlgebraElement
from sage.structure.unique_representation import UniqueRepresentation
from sage.misc.defaults import series_precision
from sage.misc.superseded import experimental
from sage.rings.all import RIF
class NoConvergenceError(RuntimeError):
pass
class AsymptoticExpansion(CommutativeAlgebraElement):
def __init__(self, parent, summands, simplify=True, convert=True):
super(AsymptoticExpansion, self).__init__(parent=parent)
from sage.data_structures.mutable_poset import MutablePoset
if not isinstance(summands, MutablePoset):
raise TypeError('Summands %s are not in a mutable poset as expected '
'when creating an element of %s.' % (summands, parent))
if convert:
from .misc import combine_exceptions
from .term_monoid import TermMonoid, ZeroCoefficientError
def convert_terms(element):
T = TermMonoid(term_monoid=element.parent(), asymptotic_ring=parent)
try:
return T(element)
except ZeroCoefficientError:
return None
except (ValueError, TypeError) as e:
raise combine_exceptions(
ValueError('Cannot include %s with parent %s in %s' %
(element, element.parent(), parent)), e)
new_summands = summands.copy()
new_summands.map(convert_terms, topological=True, reverse=True)
self._summands_ = new_summands
else:
self._summands_ = summands
if simplify:
self._simplify_()
@property
def summands(self):
return self._summands_
def __hash__(self):
return hash(str(self))
def __nonzero__(self):
return bool(self._summands_)
def __eq__(self, other):
if other is None:
return False
try:
return not bool(self - other)
except (TypeError, ValueError):
return False
def __ne__(self, other):
return not self == other
def has_same_summands(self, other):
if other is None:
return False
from sage.structure.element import have_same_parent
if have_same_parent(self, other):
return self._has_same_summands_(other)
from sage.structure.element import get_coercion_model
return get_coercion_model().bin_op(self, other,
lambda self, other:
self._has_same_summands_(other))
def _has_same_summands_(self, other):
if len(self.summands) != len(other.summands):
return False
from builtins import zip
return all(s == o for s, o in
zip(self.summands.elements_topological(),
other.summands.elements_topological()))
def _simplify_(self):
self._summands_.merge(reverse=True)
def _repr_(self, latex=False):
if latex:
from sage.misc.latex import latex as latex_repr
f = latex_repr
else:
f = repr
s = ' + '.join(f(elem) for elem in
self.summands.elements_topological(reverse=True))
s = s.replace('+ -', '- ')
if not s:
return '0'
return s
def _latex_(self):
return self._repr_(latex=True)
def show(self):
from sage.repl.rich_output.pretty_print import pretty_print
pretty_print(self)
def _add_(self, other):
return self.parent()(self.summands.union(other.summands),
simplify=True, convert=False)
def _sub_(self, other):
return self + self.parent().coefficient_ring(-1)*other
def _mul_term_(self, term):
simplify = not term.is_exact()
return self.parent()(self.summands.mapped(lambda element: term * element),
simplify=simplify, convert=False)
def _mul_(self, other):
return sum(iter(self._mul_term_(term_other) for
term_other in other.summands.elements()),
self.parent().zero())
def _lmul_(self, other):
if other.is_zero():
return self.parent().zero()
from .term_monoid import TermMonoid
E = TermMonoid('exact', asymptotic_ring=self.parent())
e = E(self.parent().growth_group.one(), coefficient=other)
return self._mul_term_(e)
def _div_(self, other):
return self * ~other
def __invert__(self, precision=None):
if not self.summands:
raise ZeroDivisionError(
'Cannot invert {} in {}.'.format(self, self.parent()))
(imax_elem, x) = self._main_term_relative_error_(return_inverse_main_term=True)
one = x.parent().one()
if x:
import itertools
result = AsymptoticExpansion._power_series_(
coefficients=itertools.repeat(one),
start=one,
ratio=-x,
ratio_start=one,
precision=precision)
else:
result = one
return result._mul_term_(imax_elem)
invert = __invert__
def truncate(self, precision=None):
if precision is None:
precision = self.parent().default_prec
if len(self.summands) <= precision:
return self
summands = self.summands.copy()
from .term_monoid import TermMonoid
def convert_terms(element):
if convert_terms.count < precision:
convert_terms.count += 1
return element
T = TermMonoid(term_monoid='O', asymptotic_ring=self.parent())
return T(element)
convert_terms.count = 0
summands.map(convert_terms, topological=True, reverse=True)
return self.parent()(summands, simplify=True, convert=False)
def exact_part(self):
exact_terms = self.summands.copy()
for term in self.summands.elements_topological():
if not term.is_exact():
exact_terms.remove(term.growth)
return self.parent(exact_terms)
def __pow__(self, exponent, precision=None):
if not self.summands:
if exponent == 0:
return self.parent().one()
elif exponent > 0:
return self.parent().zero()
elif exponent < 0:
raise ZeroDivisionError('Cannot take %s to the negative exponent %s.' %
(self, exponent))
else:
raise NotImplementedError('Taking %s to the exponent %s not implemented.' %
(self, exponent))
elif exponent == 0:
return self.parent().one()
elif exponent == 1:
return self
elif len(self.summands) == 1:
element = next(self.summands.elements())
if isinstance(exponent, AsymptoticExpansion) and element.is_constant():
return exponent.rpow(base=element.coefficient, precision=precision)
try:
return self.parent()._create_element_in_extension_(
element ** exponent, element.parent())
except (ArithmeticError, TypeError, ValueError):
if not isinstance(exponent, AsymptoticExpansion):
raise
from sage.rings.integer_ring import ZZ
try:
exponent = ZZ(exponent)
except (TypeError, ValueError):
pass
else:
return super(AsymptoticExpansion, self).__pow__(exponent)
from sage.rings.rational_field import QQ
try:
exponent = QQ(exponent)
except (TypeError, ValueError):
pass
else:
return self.__pow_number__(exponent, precision=precision)
from sage.symbolic.expression import Expression
if isinstance(exponent, Expression) and exponent.is_constant():
return self.__pow_number__(exponent, precision=precision)
if isinstance(exponent, AsymptoticExpansion) and len(self.summands) != 1:
try:
return self.__pow_number__(exponent, precision=precision,
check_convergence=True)
except NoConvergenceError:
pass
try:
return (exponent * self.log(precision=precision)).exp(precision=precision)
except (TypeError, ValueError, ZeroDivisionError) as e:
from .misc import combine_exceptions
raise combine_exceptions(
ValueError('Cannot take %s to the exponent %s.' % (self, exponent)), e)
pow = __pow__
def __pow_number__(self, exponent, precision=None, check_convergence=False):
if not self.summands:
if exponent > 0:
return self.parent().zero()
elif exponent.is_zero():
return self.parent().one()
elif exponent < 0:
raise ZeroDivisionError(
'Cannot take {} to the negative '
'exponent {}.'.format(self, exponent))
else:
raise ValueError(
'Possible division by zero, since sign of the exponent '
'{} cannot be determined.'.format(exponent))
elif len(self.summands) == 1:
element = next(self.summands.elements())
return self.parent()._create_element_in_extension_(
element**exponent, element.parent())
try:
(max_elem, x) = self._main_term_relative_error_()
except ValueError:
if check_convergence:
raise NoConvergenceError
raise
if check_convergence:
if not (x * exponent).is_little_o_of_one():
raise NoConvergenceError
pmax = self.parent()(max_elem)**exponent
import itertools
def binomials(a):
P = a.parent()
a = a + 1
f = P(1)
for k in itertools.count(1):
k = P(k)
b = a - k
if b == 0:
return
f *= b / k
yield f
one = x.parent().one()
result = AsymptoticExpansion._power_series_(
coefficients=binomials(exponent),
start=one,
ratio=x,
ratio_start=one,
precision=precision)
return result * pmax
def sqrt(self, precision=None):
from sage.rings.rational_field import QQ
return self.pow(QQ(1)/QQ(2), precision=precision)
def O(self):
if not self.summands:
from .misc import NotImplementedOZero
raise NotImplementedOZero(self.parent())
return sum(self.parent().create_summand('O', growth=element)
for element in self.summands.maximal_elements())
def log(self, base=None, precision=None):
P = self.parent()
if not self.summands:
raise ArithmeticError('Cannot compute log(0) in %s.' % (self.parent(),))
elif len(self.summands) == 1:
if self.is_one():
return P.zero()
element = next(self.summands.elements())
return sum(P._create_element_in_extension_(l, element.parent())
for l in element.log_term(base=base))
(max_elem, x) = self._main_term_relative_error_()
geom = -x
from sage.rings.integer_ring import ZZ
import itertools
result = - AsymptoticExpansion._power_series_(
coefficients=iter(1 / ZZ(k)
for k in itertools.count(2)),
start=geom,
ratio=geom,
ratio_start=geom,
precision=precision)
result += x.parent()(max_elem).log()
if base:
from sage.functions.log import log
result = result / log(base)
return result
def is_exact(self):
return all(T.is_exact() for T in self.summands)
def is_little_o_of_one(self):
return all(term.is_little_o_of_one() for term in self.summands.maximal_elements())
def rpow(self, base, precision=None):
if isinstance(base, AsymptoticExpansion):
return base.__pow__(self, precision=precision)
P = self.parent()
expr_o = self.summands.copy()
large_terms = []
for term in self.summands.elements_topological():
if not term.is_little_o_of_one():
large_terms.append(term)
expr_o.remove(term.growth)
expr_o = P(expr_o)
try:
large_result = P.prod(
P._create_element_in_extension_(term.rpow(base),
term.parent())
for term in large_terms)
except (TypeError, ValueError) as e:
from .misc import combine_exceptions
raise combine_exceptions(
ValueError('Cannot construct the power of %s to the '
'exponent %s in %s.' %
(base, self, self.parent())), e)
if not expr_o:
return large_result
if base == 'e':
geom = expr_o
else:
from sage.functions.log import log
geom = expr_o * log(base)
P = geom.parent()
from sage.rings.integer_ring import ZZ
import itertools
def inverted_factorials():
f = ZZ(1)
for k in itertools.count(1):
f /= ZZ(k)
yield f
result = AsymptoticExpansion._power_series_(
coefficients=inverted_factorials(),
start=P.one(),
ratio=geom,
ratio_start=P.one(),
precision=precision)
return result * large_result
def _main_term_relative_error_(self, return_inverse_main_term=False):
if not self.summands:
raise ArithmeticError("Cannot determine main term of 0.")
max_elem = tuple(self.summands.maximal_elements())
if len(max_elem) != 1:
raise ValueError('Cannot determine main term of {} since there '
'are several maximal elements {}.'.format(
self, ', '.join(str(e) for e in
sorted(max_elem, key=str))))
max_elem = max_elem[0]
imax_elem = ~max_elem
if imax_elem.parent() is max_elem.parent():
new_self = self
else:
new_self = self.parent()._create_element_in_extension_(
imax_elem, max_elem.parent()).parent()(self)
one = new_self.parent().one()
x = - one + new_self._mul_term_(imax_elem)
if return_inverse_main_term:
return (imax_elem, x)
else:
return (max_elem, x)
@staticmethod
def _power_series_(coefficients, start, ratio, ratio_start, precision):
result = start
g = ratio_start
for c in coefficients:
g *= ratio
new_result = (result + c*g).truncate(precision=precision)
if new_result.has_same_summands(result):
break
result = new_result
return result
def exp(self, precision=None):
return self.rpow('e', precision=precision)
def substitute(self, rules=None, domain=None, **kwds):
if not rules and not kwds:
return self
gens = self.parent().gens()
locals = kwds or dict()
if isinstance(rules, dict):
for k, v in rules.iteritems():
if not isinstance(k, str) and k not in gens:
raise TypeError('Cannot substitute %s in %s '
'since it is neither an '
'asymptotic expansion '
'nor a string (but a %s).' %
(k, self, type(k)))
k = str(k)
if k in locals and locals[k] != v:
raise ValueError('Cannot substitute in %s: '
'duplicate key %s.' % (self, k))
locals[k] = v
elif rules is not None:
raise TypeError('Substitution rules %s have to be a dictionary.' %
(rules,))
for g in gens:
locals.setdefault(str(g), g)
gens_str = tuple(str(g) for g in gens)
for k in locals:
if str(k) not in gens_str:
raise ValueError('Cannot substitute %s in %s '
'since it is not a generator of %s.' %
(k, self, self.parent()))
if domain is None and \
('_zero_' not in locals or '_one_' not in locals):
P = self.parent()
for g in gens:
G = locals[str(g)].parent()
if G is not P:
domain = G
break
else:
domain = P
locals.setdefault('_zero_', domain.zero())
locals.setdefault('_one_', domain.one())
try:
return self._substitute_(locals)
except (ArithmeticError, TypeError, ValueError) as e:
from .misc import combine_exceptions
rules = '{' + ', '.join(
'%s: %s' % (k, v)
for k, v in sorted(locals.iteritems(),
key=lambda k: str(k[0]))
if not k.startswith('_') and
not any(k == str(g) and v is g for g in gens)) + '}'
raise combine_exceptions(
TypeError('Cannot apply the substitution rules %s on %s '
'in %s.' % (rules, self, self.parent())), e)
subs = substitute
def _substitute_(self, rules):
if not self.summands:
return rules['_zero_']
from sage.symbolic.operators import add_vararg
try:
return add_vararg(
*tuple(s._substitute_(rules)
for s in self.summands.elements_topological()))
except (ArithmeticError, TypeError, ValueError) as e:
from .misc import substitute_raise_exception
substitute_raise_exception(self, e)
def compare_with_values(self, variable, function, values,
rescaled=True, ring=RIF):
from .term_monoid import OTerm
from sage.rings.integer_ring import ZZ
main = self.exact_part()
error = self - main
error_terms = list(error.summands)
if len(error_terms) != 1:
raise NotImplementedError("exactly one error term required")
if not isinstance(error_terms[0], OTerm):
raise NotImplementedError("{} is not an O term".format(error))
error_growth = error_terms[0].growth
if hasattr(function, 'variables'):
expr = function
vars = expr.variables()
if len(vars) > 1:
raise NotImplementedError("expression {} has more than one "
"variable".format(expr))
elif len(vars) == 1:
v = vars[0]
def function(arg):
return expr.subs({v: arg})
else:
def function(arg):
return expr
if rescaled:
points = list(
(k, ring((main.subs({variable: k}) - function(k)) /
error_growth._substitute_({str(variable): k,
'_one_': ZZ(1)})))
for k in values)
else:
points = list(
(k, ring(main.subs({variable: k}) - function(k)))
for k in values)
return points
def plot_comparison(self, variable, function, values, rescaled=True,
ring=RIF, relative_tolerance=0.025, **kwargs):
from sage.plot.plot import list_plot
points = self.compare_with_values(variable, function,
values, rescaled=rescaled, ring=ring)
from sage.rings.real_mpfi import RealIntervalField_class
if isinstance(ring, RealIntervalField_class):
if not all(p[1].relative_diameter() <= relative_tolerance for p in points):
raise ValueError('Numerical noise is too high, the '
'comparison is inaccurate')
points = [(p[0], p[1].center()) for p in points]
return list_plot(points, **kwargs)
def symbolic_expression(self, R=None):
if R is None:
from sage.symbolic.ring import SR
R = SR
return self.substitute(dict((g, R(R.var(str(g))))
for g in self.parent().gens()),
domain=R)
_symbolic_ = symbolic_expression
def map_coefficients(self, f, new_coefficient_ring=None):
def mapping(term):
T = term.parent().change_parameter(
coefficient_ring=new_coefficient_ring)
if hasattr(term, 'coefficient'):
c = f(term.coefficient)
if c.is_zero():
return None
return T(term.growth, c)
else:
return T(term.growth)
P = self.parent().change_parameter(coefficient_ring=new_coefficient_ring)
S = self.summands.copy()
S.map(mapping)
return P(S, simplify=False, convert=False)
def factorial(self):
vars = self.variable_names()
if len(vars) == 0:
if self.is_zero():
return self.parent().one()
assert len(self.summands) == 1
element = next(self.summands.elements())
return self.parent()._create_element_in_extension_(
element._factorial_(), element.parent())
if len(vars) == 1:
from .asymptotic_expansion_generators import \
asymptotic_expansions
var = vars[0]
S = asymptotic_expansions.Stirling(
var, precision=self.parent().default_prec)
from sage.structure.element import get_coercion_model
cm = get_coercion_model()
P = cm.common_parent(self, S)
return S.subs({var: P.coerce(self)})
else:
raise ValueError(
'Cannot build the factorial of {} since it is not '
'univariate.'.format(self))
def variable_names(self):
vars = sorted(sum(iter(s.variable_names()
for s in self.summands),
tuple()))
from itertools import groupby
return tuple(v for v, _ in groupby(vars))
def _singularity_analysis_(self, var, zeta, precision=None):
from .misc import NotImplementedOZero
OZeroEncountered = False
if precision is None:
precision = self.parent().default_prec
result = 0
for s in self.summands:
try:
contribution = s._singularity_analysis_(
var=var, zeta=zeta,
precision=precision)
except NotImplementedOZero:
OZeroEncountered = True
else:
result += contribution
if OZeroEncountered and result.is_exact():
raise NotImplementedOZero(self)
return result
class AsymptoticRing(Algebra, UniqueRepresentation):
Element = AsymptoticExpansion
__default_prec__ = series_precision()
@staticmethod
def __classcall__(cls, growth_group=None, coefficient_ring=None,
names=None, category=None, default_prec=None):
from sage.categories.sets_cat import Sets
from sage.categories.rings import Rings
Sets_parent_class = Sets().parent_class
while issubclass(cls, Sets_parent_class):
cls = cls.__base__
if isinstance(growth_group, str):
from .growth_group import GrowthGroup
growth_group = GrowthGroup(growth_group)
if growth_group is None:
raise ValueError('Growth group not specified. Cannot continue.')
if coefficient_ring is None:
raise ValueError('Coefficient ring not specified. Cannot continue.')
if coefficient_ring not in Rings():
raise ValueError('%s is not a ring. Cannot continue.' % (coefficient_ring,))
strgens = tuple(str(g) for g in growth_group.gens_monomial())
def format_names(N):
return ('s ' if len(N) != 1 else ' ') + ', '.join("'%s'" % n for n in N)
if names and not strgens:
raise ValueError('%s does not provide any generators but name%s given.' %
(growth_group, format_names(names)))
elif names is not None and len(names) == 1 and len(strgens) == 1:
pass
elif names is not None and names != strgens:
raise ValueError('Name%s do not coincide with generator%s of %s.' %
(format_names(names), format_names(strgens), growth_group))
if category is None:
from sage.categories.commutative_algebras import CommutativeAlgebras
from sage.categories.rings import Rings
category = CommutativeAlgebras(Rings())
if default_prec is None:
default_prec = cls.__default_prec__
return super(AsymptoticRing,
cls).__classcall__(cls, growth_group, coefficient_ring,
category=category,
default_prec=default_prec)
@experimental(trac_number=17601)
def __init__(self, growth_group, coefficient_ring, category, default_prec):
self._coefficient_ring_ = coefficient_ring
self._growth_group_ = growth_group
self._default_prec_ = default_prec
super(AsymptoticRing, self).__init__(base_ring=coefficient_ring,
category=category)
@property
def growth_group(self):
return self._growth_group_
@property
def coefficient_ring(self):
return self._coefficient_ring_
@property
def default_prec(self):
return self._default_prec_
def change_parameter(self, **kwds):
parameters = ('growth_group', 'coefficient_ring', 'default_prec')
values = dict()
for parameter in parameters:
default = getattr(self, parameter)
values[parameter] = kwds.get(parameter, default)
if values[parameter] is None:
values[parameter] = default
values['category'] = self.category()
if isinstance(values['growth_group'], str):
from .growth_group import GrowthGroup
values['growth_group'] = GrowthGroup(values['growth_group'])
if all(values[parameter] is getattr(self, parameter)
for parameter in parameters) and values['category'] is self.category():
return self
from .misc import underlying_class
return underlying_class(self)(**values)
@staticmethod
def _create_empty_summands_():
from sage.data_structures.mutable_poset import MutablePoset
from .term_monoid import can_absorb, absorption
return MutablePoset(key=lambda element: element.growth,
can_merge=can_absorb,
merge=absorption)
def _create_element_in_extension_(self, term, old_term_parent=None):
if old_term_parent is None or term.parent() is old_term_parent:
parent = self
else:
parent = self.change_parameter(
growth_group=term.parent().growth_group,
coefficient_ring=term.parent().coefficient_ring)
return parent(term, simplify=False, convert=False)
def _element_constructor_(self, data, simplify=True, convert=True):
from sage.data_structures.mutable_poset import MutablePoset
if isinstance(data, MutablePoset):
return self.element_class(self, data, simplify=simplify, convert=convert)
if type(data) == self.element_class and data.parent() == self:
return data
if isinstance(data, AsymptoticExpansion):
return self.element_class(self, data.summands,
simplify=simplify, convert=convert)
from .term_monoid import GenericTerm
if isinstance(data, GenericTerm):
data = (data,)
if isinstance(data, (list, tuple)):
if not all(isinstance(elem, GenericTerm) for elem in data):
raise TypeError('Not all list entries of %s '
'are asymptotic terms, so cannot create an '
'asymptotic expansion in %s.' % (data, self))
summands = AsymptoticRing._create_empty_summands_()
summands.union_update(data)
return self.element_class(self, summands,
simplify=simplify, convert=convert)
if not data:
summands = AsymptoticRing._create_empty_summands_()
return self.element_class(self, summands,
simplify=simplify, convert=False)
try:
P = data.parent()
except AttributeError:
return self.create_summand('exact', data)
from .misc import combine_exceptions
from sage.symbolic.ring import SymbolicRing
from sage.rings.polynomial.polynomial_ring import is_PolynomialRing
from sage.rings.polynomial.multi_polynomial_ring_generic import is_MPolynomialRing
from sage.rings.power_series_ring import is_PowerSeriesRing
if isinstance(P, SymbolicRing):
from sage.symbolic.operators import add_vararg
if data.operator() == add_vararg:
summands = []
for summand in data.operands():
ry:
summands.append(self.create_summand('exact', summand))
except ValueError as e:
raise combine_exceptions(
ValueError('Symbolic expression %s is not in %s.' %
(data, self)), e)
return sum(summands, self.zero())
elif is_PolynomialRing(P):
p = P.gen()
try:
return sum(iter(self.create_summand('exact', growth=p**i,
coefficient=c)
for i, c in enumerate(data)),
self.zero())
except ValueError as e:
raise combine_exceptions(
ValueError('Polynomial %s is not in %s' % (data, self)), e)
elif is_MPolynomialRing(P):
try:
return sum(iter(self.create_summand('exact', growth=g, coefficient=c)
for c, g in iter(data)),
self.zero())
except ValueError as e:
raise combine_exceptions(
ValueError('Polynomial %s is not in %s' % (data, self)), e)
elif is_PowerSeriesRing(P):
raise NotImplementedError(
'Cannot convert %s from the %s to an asymptotic expansion '
'in %s, since growths at other points than +oo are not yet '
'supported.' % (data, P, self))
p = P.gen()
try:
result = self(data.polynomial())
except ValueError as e:
raise combine_exceptions(
ValueError('Powerseries %s is not in %s' % (data, self)), e)
prec = data.precision_absolute()
if prec < sage.rings.infinity.PlusInfinity():
try:
result += self.create_summand('O', growth=p**prec)
except ValueError as e:
raise combine_exceptions(
ValueError('Powerseries %s is not in %s' %
(data, self)), e)
return result
return self.create_summand('exact', data)
def _coerce_map_from_(self, R):
from sage.data_structures.mutable_poset import MutablePoset
if R == MutablePoset:
return
if self.coefficient_ring.has_coerce_map_from(R):
return True
if self.growth_group.has_coerce_map_from(R):
return True
elif isinstance(R, AsymptoticRing):
if self.growth_group.has_coerce_map_from(R.growth_group) and \
self.coefficient_ring.has_coerce_map_from(R.coefficient_ring):
return True
def _repr_(self):
try:
G = '<' + self.growth_group._repr_(condense=True) + '>'
except TypeError:
G = repr(self.growth_group)
return 'Asymptotic Ring %s over %s' % (G, self.coefficient_ring)
def _an_element_(self):
from .term_monoid import TermMonoid
E = TermMonoid('exact', asymptotic_ring=self)
O = TermMonoid('O', asymptotic_ring=self)
return self(E.an_element(), simplify=False, convert=False)**3 + \
self(O.an_element(), simplify=False, convert=False)
def some_elements(self):
from sage.misc.mrange import cantor_product
from .term_monoid import TermMonoid
E = TermMonoid('exact', asymptotic_ring=self)
O = TermMonoid('O', asymptotic_ring=self)
return iter(self(e, simplify=False, convert=False)**3 +
self(o, simplify=False, convert=False)
for e, o in cantor_product(
E.some_elements(), O.some_elements()))
def gens(self):
return tuple(self.create_summand('exact',
growth=g,
coefficient=self.coefficient_ring(1))
for g in self.growth_group.gens_monomial())
def gen(self, n=0):
return self.gens()[n]
def ngens(self):
return len(self.growth_group.gens_monomial())
def coefficients_of_generating_function(self, function, singularities, precision=None,
return_singular_expansions=False):
from sage.symbolic.ring import SR
from .misc import NotImplementedOZero
singular_expansions = {}
OZeroEncountered = False
A = AsymptoticRing('T^QQ * log(T)^QQ', coefficient_ring=SR,
default_prec=precision)
T = A.gen()
result = A.zero()
for singularity in singularities:
singular_expansion = A(function((1-1/T)*singularity))
singular_expansions[singularity] = singular_expansion
try:
contribution = singular_expansion._singularity_analysis_(
var='Z', zeta=singularity,
precision=precision).subs(Z=self.gen())
except NotImplementedOZero:
OZeroEncountered = True
else:
result += contribution
if OZeroEncountered and result.is_exact():
raise NotImplementedOZero(self)
if return_singular_expansions:
from collections import namedtuple
SingularityAnalysisResult = namedtuple(
'SingularityAnalysisResult',
['asymptotic_expansion', 'singular_expansions'])
return SingularityAnalysisResult(
asymptotic_expansion=result,
singular_expansions=singular_expansions)
else:
return result
def create_summand(self, type, data=None, **kwds):
from .term_monoid import TermMonoid, ZeroCoefficientError
TM = TermMonoid(type, asymptotic_ring=self)
if data is None:
try:
data = kwds.pop('growth')
except KeyError:
raise TypeError("Neither 'data' nor 'growth' are specified.")
if type == 'exact' and kwds.get('coefficient') is None:
raise TypeError("Cannot create exact term: only 'growth' "
"but no 'coefficient' specified.")
try:
return self(TM(data, **kwds), simplify=False, convert=False)
except ZeroCoefficientError:
return self.zero()
def variable_names(self):
return self.growth_group.variable_names()
def construction(self):
return AsymptoticRingFunctor(self.growth_group), self.coefficient_ring
from sage.categories.pushout import ConstructionFunctor
class AsymptoticRingFunctor(ConstructionFunctor):
rank = 13
def __init__(self, growth_group):
self.growth_group = growth_group
from sage.categories.rings import Rings
super(ConstructionFunctor, self).__init__(
Rings(), Rings())
def _repr_(self):
return 'AsymptoticRing<%s>' % (self.growth_group._repr_(condense=True),)
def _apply_functor(self, coefficient_ring):
return AsymptoticRing(growth_group=self.growth_group,
coefficient_ring=coefficient_ring)
def merge(self, other):
if self == other:
return self
if isinstance(other, AsymptoticRingFunctor):
from sage.structure.element import get_coercion_model
cm = get_coercion_model()
try:
G = cm.common_parent(self.growth_group, other.growth_group)
except TypeError:
pass
else:
return AsymptoticRingFunctor(G)
def __eq__(self, other):
return type(self) == type(other) and \
self.growth_group == other.growth_group
def __ne__(self, other):
return not self == other
| true
| true
|
1c415974dbb17efd80c002344c32610098e1e9c0
| 386
|
py
|
Python
|
tests/test_comments.py
|
derrokip34/Blog
|
ec6d6acb0c69ea42a8ea99c6836d943ad7417984
|
[
"MIT"
] | null | null | null |
tests/test_comments.py
|
derrokip34/Blog
|
ec6d6acb0c69ea42a8ea99c6836d943ad7417984
|
[
"MIT"
] | null | null | null |
tests/test_comments.py
|
derrokip34/Blog
|
ec6d6acb0c69ea42a8ea99c6836d943ad7417984
|
[
"MIT"
] | null | null | null |
import unittest
from app.models import Comments
class CommentTest(unittest.TestCase):
def setUp(self):
self.new_comment = Comments(comment="Great Idea")
def test_instance(self):
self.assertTrue(isinstance(self.new_comment,Comments))
def test_save_comment(self):
self.new_comment.save_comment()
self.assertTrue(len(Comments.query.all())>0)
| 27.571429
| 62
| 0.715026
|
import unittest
from app.models import Comments
class CommentTest(unittest.TestCase):
def setUp(self):
self.new_comment = Comments(comment="Great Idea")
def test_instance(self):
self.assertTrue(isinstance(self.new_comment,Comments))
def test_save_comment(self):
self.new_comment.save_comment()
self.assertTrue(len(Comments.query.all())>0)
| true
| true
|
1c415b018bc2ea14b606ce70a1d3704a8e03c347
| 14,497
|
bzl
|
Python
|
third_party/systemlibs/protobuf.bzl
|
abhaikollara/tensorflow
|
4f96df3659696990cb34d0ad07dc67843c4225a9
|
[
"Apache-2.0"
] | 26
|
2019-11-10T15:33:34.000Z
|
2022-03-24T19:56:57.000Z
|
third_party/systemlibs/protobuf.bzl
|
abhaikollara/tensorflow
|
4f96df3659696990cb34d0ad07dc67843c4225a9
|
[
"Apache-2.0"
] | 6
|
2022-01-15T07:17:47.000Z
|
2022-02-14T15:28:22.000Z
|
third_party/systemlibs/protobuf.bzl
|
abhaikollara/tensorflow
|
4f96df3659696990cb34d0ad07dc67843c4225a9
|
[
"Apache-2.0"
] | 6
|
2020-03-29T11:10:53.000Z
|
2021-06-14T05:39:14.000Z
|
def _GetPath(ctx, path):
if ctx.label.workspace_root:
return ctx.label.workspace_root + "/" + path
else:
return path
def _IsNewExternal(ctx):
# Bazel 0.4.4 and older have genfiles paths that look like:
# bazel-out/local-fastbuild/genfiles/external/repo/foo
# After the exec root rearrangement, they look like:
# ../repo/bazel-out/local-fastbuild/genfiles/foo
return ctx.label.workspace_root.startswith("../")
def _GenDir(ctx):
if _IsNewExternal(ctx):
# We are using the fact that Bazel 0.4.4+ provides repository-relative paths
# for ctx.genfiles_dir.
return ctx.genfiles_dir.path + (
"/" + ctx.attr.includes[0] if ctx.attr.includes and ctx.attr.includes[0] else ""
)
# This means that we're either in the old version OR the new version in the local repo.
# Either way, appending the source path to the genfiles dir works.
return ctx.var["GENDIR"] + "/" + _SourceDir(ctx)
def _SourceDir(ctx):
if not ctx.attr.includes:
return ctx.label.workspace_root
if not ctx.attr.includes[0]:
return _GetPath(ctx, ctx.label.package)
if not ctx.label.package:
return _GetPath(ctx, ctx.attr.includes[0])
return _GetPath(ctx, ctx.label.package + "/" + ctx.attr.includes[0])
def _CcHdrs(srcs, use_grpc_plugin = False):
ret = [s[:-len(".proto")] + ".pb.h" for s in srcs]
if use_grpc_plugin:
ret += [s[:-len(".proto")] + ".grpc.pb.h" for s in srcs]
return ret
def _CcSrcs(srcs, use_grpc_plugin = False):
ret = [s[:-len(".proto")] + ".pb.cc" for s in srcs]
if use_grpc_plugin:
ret += [s[:-len(".proto")] + ".grpc.pb.cc" for s in srcs]
return ret
def _CcOuts(srcs, use_grpc_plugin = False):
return _CcHdrs(srcs, use_grpc_plugin) + _CcSrcs(srcs, use_grpc_plugin)
def _PyOuts(srcs, use_grpc_plugin = False):
ret = [s[:-len(".proto")] + "_pb2.py" for s in srcs]
if use_grpc_plugin:
ret += [s[:-len(".proto")] + "_pb2_grpc.py" for s in srcs]
return ret
def _RelativeOutputPath(path, include, dest = ""):
if include == None:
return path
if not path.startswith(include):
fail("Include path %s isn't part of the path %s." % (include, path))
if include and include[-1] != "/":
include = include + "/"
if dest and dest[-1] != "/":
dest = dest + "/"
path = path[len(include):]
return dest + path
def _proto_gen_impl(ctx):
"""General implementation for generating protos"""
srcs = ctx.files.srcs
deps = []
deps += ctx.files.srcs
source_dir = _SourceDir(ctx)
gen_dir = _GenDir(ctx)
if source_dir:
import_flags = ["-I" + source_dir, "-I" + gen_dir]
else:
import_flags = ["-I."]
for dep in ctx.attr.deps:
import_flags += dep.proto.import_flags
deps += dep.proto.deps
args = []
if ctx.attr.gen_cc:
args += ["--cpp_out=" + gen_dir]
if ctx.attr.gen_py:
args += ["--python_out=" + gen_dir]
inputs = srcs + deps
if ctx.executable.plugin:
plugin = ctx.executable.plugin
lang = ctx.attr.plugin_language
if not lang and plugin.basename.startswith("protoc-gen-"):
lang = plugin.basename[len("protoc-gen-"):]
if not lang:
fail("cannot infer the target language of plugin", "plugin_language")
outdir = gen_dir
if ctx.attr.plugin_options:
outdir = ",".join(ctx.attr.plugin_options) + ":" + outdir
args += ["--plugin=protoc-gen-%s=%s" % (lang, plugin.path)]
args += ["--%s_out=%s" % (lang, outdir)]
inputs += [plugin]
if args:
ctx.actions.run(
inputs = inputs,
outputs = ctx.outputs.outs,
arguments = args + import_flags + [s.path for s in srcs],
executable = ctx.executable.protoc,
mnemonic = "ProtoCompile",
use_default_shell_env = True,
)
return struct(
proto = struct(
srcs = srcs,
import_flags = import_flags,
deps = deps,
),
)
proto_gen = rule(
attrs = {
"srcs": attr.label_list(allow_files = True),
"deps": attr.label_list(providers = ["proto"]),
"includes": attr.string_list(),
"protoc": attr.label(
cfg = "host",
executable = True,
allow_single_file = True,
mandatory = True,
),
"plugin": attr.label(
cfg = "host",
allow_files = True,
executable = True,
),
"plugin_language": attr.string(),
"plugin_options": attr.string_list(),
"gen_cc": attr.bool(),
"gen_py": attr.bool(),
"outs": attr.output_list(),
},
output_to_genfiles = True,
implementation = _proto_gen_impl,
)
"""Generates codes from Protocol Buffers definitions.
This rule helps you to implement Skylark macros specific to the target
language. You should prefer more specific `cc_proto_library `,
`py_proto_library` and others unless you are adding such wrapper macros.
Args:
srcs: Protocol Buffers definition files (.proto) to run the protocol compiler
against.
deps: a list of dependency labels; must be other proto libraries.
includes: a list of include paths to .proto files.
protoc: the label of the protocol compiler to generate the sources.
plugin: the label of the protocol compiler plugin to be passed to the protocol
compiler.
plugin_language: the language of the generated sources
plugin_options: a list of options to be passed to the plugin
gen_cc: generates C++ sources in addition to the ones from the plugin.
gen_py: generates Python sources in addition to the ones from the plugin.
outs: a list of labels of the expected outputs from the protocol compiler.
"""
def cc_proto_library(
name,
srcs = [],
deps = [],
cc_libs = [],
include = None,
protoc = "@com_google_protobuf//:protoc",
internal_bootstrap_hack = False,
use_grpc_plugin = False,
default_runtime = "@com_google_protobuf//:protobuf",
**kargs):
"""Bazel rule to create a C++ protobuf library from proto source files
NOTE: the rule is only an internal workaround to generate protos. The
interface may change and the rule may be removed when bazel has introduced
the native rule.
Args:
name: the name of the cc_proto_library.
srcs: the .proto files of the cc_proto_library.
deps: a list of dependency labels; must be cc_proto_library.
cc_libs: a list of other cc_library targets depended by the generated
cc_library.
include: a string indicating the include path of the .proto files.
protoc: the label of the protocol compiler to generate the sources.
internal_bootstrap_hack: a flag indicate the cc_proto_library is used only
for bootstraping. When it is set to True, no files will be generated.
The rule will simply be a provider for .proto files, so that other
cc_proto_library can depend on it.
use_grpc_plugin: a flag to indicate whether to call the grpc C++ plugin
when processing the proto files.
default_runtime: the implicitly default runtime which will be depended on by
the generated cc_library target.
**kargs: other keyword arguments that are passed to cc_library.
"""
includes = []
if include != None:
includes = [include]
if internal_bootstrap_hack:
# For pre-checked-in generated files, we add the internal_bootstrap_hack
# which will skip the codegen action.
proto_gen(
name = name + "_genproto",
srcs = srcs,
deps = [s + "_genproto" for s in deps],
includes = includes,
protoc = protoc,
visibility = ["//visibility:public"],
)
# An empty cc_library to make rule dependency consistent.
native.cc_library(
name = name,
**kargs
)
return
grpc_cpp_plugin = None
if use_grpc_plugin:
grpc_cpp_plugin = "//external:grpc_cpp_plugin"
gen_srcs = _CcSrcs(srcs, use_grpc_plugin)
gen_hdrs = _CcHdrs(srcs, use_grpc_plugin)
outs = gen_srcs + gen_hdrs
proto_gen(
name = name + "_genproto",
srcs = srcs,
deps = [s + "_genproto" for s in deps],
includes = includes,
protoc = protoc,
plugin = grpc_cpp_plugin,
plugin_language = "grpc",
gen_cc = 1,
outs = outs,
visibility = ["//visibility:public"],
)
if default_runtime and not default_runtime in cc_libs:
cc_libs = cc_libs + [default_runtime]
if use_grpc_plugin:
cc_libs = cc_libs + ["//external:grpc_lib"]
native.cc_library(
name = name,
srcs = gen_srcs,
hdrs = gen_hdrs,
deps = cc_libs + deps,
includes = includes,
alwayslink = 1,
**kargs
)
def internal_gen_well_known_protos_java(srcs):
"""Bazel rule to generate the gen_well_known_protos_java genrule
Args:
srcs: the well known protos
"""
root = Label("%s//protobuf_java" % (REPOSITORY_NAME)).workspace_root
pkg = PACKAGE_NAME + "/" if PACKAGE_NAME else ""
if root == "":
include = " -I%ssrc " % pkg
else:
include = " -I%s/%ssrc " % (root, pkg)
native.genrule(
name = "gen_well_known_protos_java",
srcs = srcs,
outs = [
"wellknown.srcjar",
],
cmd = "$(location :protoc) --java_out=$(@D)/wellknown.jar" +
" %s $(SRCS) " % include +
" && mv $(@D)/wellknown.jar $(@D)/wellknown.srcjar",
tools = [":protoc"],
)
def internal_copied_filegroup(name, srcs, strip_prefix, dest, **kwargs):
"""Macro to copy files to a different directory and then create a filegroup.
This is used by the //:protobuf_python py_proto_library target to work around
an issue caused by Python source files that are part of the same Python
package being in separate directories.
Args:
srcs: The source files to copy and add to the filegroup.
strip_prefix: Path to the root of the files to copy.
dest: The directory to copy the source files into.
**kwargs: extra arguments that will be passesd to the filegroup.
"""
outs = [_RelativeOutputPath(s, strip_prefix, dest) for s in srcs]
native.genrule(
name = name + "_genrule",
srcs = srcs,
outs = outs,
cmd = " && ".join(
["cp $(location %s) $(location %s)" %
(s, _RelativeOutputPath(s, strip_prefix, dest)) for s in srcs],
),
)
native.filegroup(
name = name,
srcs = outs,
**kwargs
)
def py_proto_library(
name,
srcs = [],
deps = [],
py_libs = [],
py_extra_srcs = [],
include = None,
default_runtime = "@com_google_protobuf//:protobuf_python",
protoc = "@com_google_protobuf//:protoc",
use_grpc_plugin = False,
**kargs):
"""Bazel rule to create a Python protobuf library from proto source files
NOTE: the rule is only an internal workaround to generate protos. The
interface may change and the rule may be removed when bazel has introduced
the native rule.
Args:
name: the name of the py_proto_library.
srcs: the .proto files of the py_proto_library.
deps: a list of dependency labels; must be py_proto_library.
py_libs: a list of other py_library targets depended by the generated
py_library.
py_extra_srcs: extra source files that will be added to the output
py_library. This attribute is used for internal bootstrapping.
include: a string indicating the include path of the .proto files.
default_runtime: the implicitly default runtime which will be depended on by
the generated py_library target.
protoc: the label of the protocol compiler to generate the sources.
use_grpc_plugin: a flag to indicate whether to call the Python C++ plugin
when processing the proto files.
**kargs: other keyword arguments that are passed to cc_library.
"""
outs = _PyOuts(srcs, use_grpc_plugin)
includes = []
if include != None:
includes = [include]
grpc_python_plugin = None
if use_grpc_plugin:
grpc_python_plugin = "//external:grpc_python_plugin"
# Note: Generated grpc code depends on Python grpc module. This dependency
# is not explicitly listed in py_libs. Instead, host system is assumed to
# have grpc installed.
proto_gen(
name = name + "_genproto",
srcs = srcs,
deps = [s + "_genproto" for s in deps],
includes = includes,
protoc = protoc,
gen_py = 1,
outs = outs,
visibility = ["//visibility:public"],
plugin = grpc_python_plugin,
plugin_language = "grpc",
)
if default_runtime and not default_runtime in py_libs + deps:
py_libs = py_libs + [default_runtime]
native.py_library(
name = name,
srcs = outs + py_extra_srcs,
deps = py_libs + deps,
imports = includes,
**kargs
)
def internal_protobuf_py_tests(
name,
modules = [],
**kargs):
"""Bazel rules to create batch tests for protobuf internal.
Args:
name: the name of the rule.
modules: a list of modules for tests. The macro will create a py_test for
each of the parameter with the source "google/protobuf/%s.py"
kargs: extra parameters that will be passed into the py_test.
"""
for m in modules:
s = "python/google/protobuf/internal/%s.py" % m
native.py_test(
name = "py_%s" % m,
srcs = [s],
main = s,
**kargs
)
def check_protobuf_required_bazel_version():
"""For WORKSPACE files, to check the installed version of bazel.
This ensures bazel supports our approach to proto_library() depending on a
copied filegroup. (Fixed in bazel 0.5.4)
"""
expected = apple_common.dotted_version("0.5.4")
current = apple_common.dotted_version(native.bazel_version)
if current.compare_to(expected) < 0:
fail("Bazel must be newer than 0.5.4")
| 33.95082
| 92
| 0.618266
|
def _GetPath(ctx, path):
if ctx.label.workspace_root:
return ctx.label.workspace_root + "/" + path
else:
return path
def _IsNewExternal(ctx):
return ctx.label.workspace_root.startswith("../")
def _GenDir(ctx):
if _IsNewExternal(ctx):
return ctx.genfiles_dir.path + (
"/" + ctx.attr.includes[0] if ctx.attr.includes and ctx.attr.includes[0] else ""
)
# Either way, appending the source path to the genfiles dir works.
return ctx.var["GENDIR"] + "/" + _SourceDir(ctx)
def _SourceDir(ctx):
if not ctx.attr.includes:
return ctx.label.workspace_root
if not ctx.attr.includes[0]:
return _GetPath(ctx, ctx.label.package)
if not ctx.label.package:
return _GetPath(ctx, ctx.attr.includes[0])
return _GetPath(ctx, ctx.label.package + "/" + ctx.attr.includes[0])
def _CcHdrs(srcs, use_grpc_plugin = False):
ret = [s[:-len(".proto")] + ".pb.h" for s in srcs]
if use_grpc_plugin:
ret += [s[:-len(".proto")] + ".grpc.pb.h" for s in srcs]
return ret
def _CcSrcs(srcs, use_grpc_plugin = False):
ret = [s[:-len(".proto")] + ".pb.cc" for s in srcs]
if use_grpc_plugin:
ret += [s[:-len(".proto")] + ".grpc.pb.cc" for s in srcs]
return ret
def _CcOuts(srcs, use_grpc_plugin = False):
return _CcHdrs(srcs, use_grpc_plugin) + _CcSrcs(srcs, use_grpc_plugin)
def _PyOuts(srcs, use_grpc_plugin = False):
ret = [s[:-len(".proto")] + "_pb2.py" for s in srcs]
if use_grpc_plugin:
ret += [s[:-len(".proto")] + "_pb2_grpc.py" for s in srcs]
return ret
def _RelativeOutputPath(path, include, dest = ""):
if include == None:
return path
if not path.startswith(include):
fail("Include path %s isn't part of the path %s." % (include, path))
if include and include[-1] != "/":
include = include + "/"
if dest and dest[-1] != "/":
dest = dest + "/"
path = path[len(include):]
return dest + path
def _proto_gen_impl(ctx):
srcs = ctx.files.srcs
deps = []
deps += ctx.files.srcs
source_dir = _SourceDir(ctx)
gen_dir = _GenDir(ctx)
if source_dir:
import_flags = ["-I" + source_dir, "-I" + gen_dir]
else:
import_flags = ["-I."]
for dep in ctx.attr.deps:
import_flags += dep.proto.import_flags
deps += dep.proto.deps
args = []
if ctx.attr.gen_cc:
args += ["--cpp_out=" + gen_dir]
if ctx.attr.gen_py:
args += ["--python_out=" + gen_dir]
inputs = srcs + deps
if ctx.executable.plugin:
plugin = ctx.executable.plugin
lang = ctx.attr.plugin_language
if not lang and plugin.basename.startswith("protoc-gen-"):
lang = plugin.basename[len("protoc-gen-"):]
if not lang:
fail("cannot infer the target language of plugin", "plugin_language")
outdir = gen_dir
if ctx.attr.plugin_options:
outdir = ",".join(ctx.attr.plugin_options) + ":" + outdir
args += ["--plugin=protoc-gen-%s=%s" % (lang, plugin.path)]
args += ["--%s_out=%s" % (lang, outdir)]
inputs += [plugin]
if args:
ctx.actions.run(
inputs = inputs,
outputs = ctx.outputs.outs,
arguments = args + import_flags + [s.path for s in srcs],
executable = ctx.executable.protoc,
mnemonic = "ProtoCompile",
use_default_shell_env = True,
)
return struct(
proto = struct(
srcs = srcs,
import_flags = import_flags,
deps = deps,
),
)
proto_gen = rule(
attrs = {
"srcs": attr.label_list(allow_files = True),
"deps": attr.label_list(providers = ["proto"]),
"includes": attr.string_list(),
"protoc": attr.label(
cfg = "host",
executable = True,
allow_single_file = True,
mandatory = True,
),
"plugin": attr.label(
cfg = "host",
allow_files = True,
executable = True,
),
"plugin_language": attr.string(),
"plugin_options": attr.string_list(),
"gen_cc": attr.bool(),
"gen_py": attr.bool(),
"outs": attr.output_list(),
},
output_to_genfiles = True,
implementation = _proto_gen_impl,
)
def cc_proto_library(
name,
srcs = [],
deps = [],
cc_libs = [],
include = None,
protoc = "@com_google_protobuf//:protoc",
internal_bootstrap_hack = False,
use_grpc_plugin = False,
default_runtime = "@com_google_protobuf//:protobuf",
**kargs):
includes = []
if include != None:
includes = [include]
if internal_bootstrap_hack:
proto_gen(
name = name + "_genproto",
srcs = srcs,
deps = [s + "_genproto" for s in deps],
includes = includes,
protoc = protoc,
visibility = ["//visibility:public"],
)
native.cc_library(
name = name,
**kargs
)
return
grpc_cpp_plugin = None
if use_grpc_plugin:
grpc_cpp_plugin = "//external:grpc_cpp_plugin"
gen_srcs = _CcSrcs(srcs, use_grpc_plugin)
gen_hdrs = _CcHdrs(srcs, use_grpc_plugin)
outs = gen_srcs + gen_hdrs
proto_gen(
name = name + "_genproto",
srcs = srcs,
deps = [s + "_genproto" for s in deps],
includes = includes,
protoc = protoc,
plugin = grpc_cpp_plugin,
plugin_language = "grpc",
gen_cc = 1,
outs = outs,
visibility = ["//visibility:public"],
)
if default_runtime and not default_runtime in cc_libs:
cc_libs = cc_libs + [default_runtime]
if use_grpc_plugin:
cc_libs = cc_libs + ["//external:grpc_lib"]
native.cc_library(
name = name,
srcs = gen_srcs,
hdrs = gen_hdrs,
deps = cc_libs + deps,
includes = includes,
alwayslink = 1,
**kargs
)
def internal_gen_well_known_protos_java(srcs):
root = Label("%s//protobuf_java" % (REPOSITORY_NAME)).workspace_root
pkg = PACKAGE_NAME + "/" if PACKAGE_NAME else ""
if root == "":
include = " -I%ssrc " % pkg
else:
include = " -I%s/%ssrc " % (root, pkg)
native.genrule(
name = "gen_well_known_protos_java",
srcs = srcs,
outs = [
"wellknown.srcjar",
],
cmd = "$(location :protoc) --java_out=$(@D)/wellknown.jar" +
" %s $(SRCS) " % include +
" && mv $(@D)/wellknown.jar $(@D)/wellknown.srcjar",
tools = [":protoc"],
)
def internal_copied_filegroup(name, srcs, strip_prefix, dest, **kwargs):
outs = [_RelativeOutputPath(s, strip_prefix, dest) for s in srcs]
native.genrule(
name = name + "_genrule",
srcs = srcs,
outs = outs,
cmd = " && ".join(
["cp $(location %s) $(location %s)" %
(s, _RelativeOutputPath(s, strip_prefix, dest)) for s in srcs],
),
)
native.filegroup(
name = name,
srcs = outs,
**kwargs
)
def py_proto_library(
name,
srcs = [],
deps = [],
py_libs = [],
py_extra_srcs = [],
include = None,
default_runtime = "@com_google_protobuf//:protobuf_python",
protoc = "@com_google_protobuf//:protoc",
use_grpc_plugin = False,
**kargs):
outs = _PyOuts(srcs, use_grpc_plugin)
includes = []
if include != None:
includes = [include]
grpc_python_plugin = None
if use_grpc_plugin:
grpc_python_plugin = "//external:grpc_python_plugin"
proto_gen(
name = name + "_genproto",
srcs = srcs,
deps = [s + "_genproto" for s in deps],
includes = includes,
protoc = protoc,
gen_py = 1,
outs = outs,
visibility = ["//visibility:public"],
plugin = grpc_python_plugin,
plugin_language = "grpc",
)
if default_runtime and not default_runtime in py_libs + deps:
py_libs = py_libs + [default_runtime]
native.py_library(
name = name,
srcs = outs + py_extra_srcs,
deps = py_libs + deps,
imports = includes,
**kargs
)
def internal_protobuf_py_tests(
name,
modules = [],
**kargs):
for m in modules:
s = "python/google/protobuf/internal/%s.py" % m
native.py_test(
name = "py_%s" % m,
srcs = [s],
main = s,
**kargs
)
def check_protobuf_required_bazel_version():
expected = apple_common.dotted_version("0.5.4")
current = apple_common.dotted_version(native.bazel_version)
if current.compare_to(expected) < 0:
fail("Bazel must be newer than 0.5.4")
| true
| true
|
1c415ce4cd5e6ed0e7a362c262c6aebfe3a7ea7b
| 4,153
|
py
|
Python
|
func_TDX.py
|
nanomiter/stock-analysis
|
3b2abd48e6419733c0508f7a974c559d734d31e8
|
[
"Apache-2.0"
] | 25
|
2021-01-27T14:45:11.000Z
|
2022-03-30T06:10:42.000Z
|
func_TDX.py
|
nanomiter/stock-analysis
|
3b2abd48e6419733c0508f7a974c559d734d31e8
|
[
"Apache-2.0"
] | 3
|
2021-05-17T14:12:55.000Z
|
2021-05-18T08:43:40.000Z
|
func_TDX.py
|
wkingnet/stock-analysis
|
74dc99a3acd871b558607078a1ee7787a73a9c85
|
[
"Apache-2.0"
] | 16
|
2021-03-26T03:51:43.000Z
|
2022-03-28T03:05:19.000Z
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
模仿通达信语句的函数库,如MA(C,5) REF(C,1)等样式。函数简单,只为了和通达信公式看起来一致,方便排查。
传入类型必须是pandas Series类型。
传出类型:只有MA输出具体数值,其他所有函数传出仍然是Series类型
作者:wking [http://wkings.net]
"""
import pandas as pd
def rolling_window(a, window):
"""
copy from http://stackoverflow.com/questions/6811183/rolling-window-for-1d-arrays-in-numpy
必须 numpy > 1.20 才有此函数
"""
from numpy.lib.stride_tricks import sliding_window_view
return sliding_window_view(a, window_shape=window)
def REF(value, day):
"""
引用若干周期前的数据。如果传入列表,返回具体数值。如果传入序列,返回序列
"""
if 'list' in str(type(value)):
result = value[~day]
elif 'series' in str(type(value)):
result = value.shift(periods=day)
return result
def MA(value, day) -> float:
"""
返回当前周期的简单移动平均值。传入可以是列表或序列类型。传出是当前周期的简单移动平均具体值。
:rtype: float
"""
import talib
# result = statistics.mean(value[-day:])
result = talib.SMA(value, day).iat[-1]
return result
def SMA(value, day):
"""
返回简单移动平均序列。传入可以是列表或序列类型。传出是历史到当前周期为止的简单移动平均序列。
"""
import talib
# result = statistics.mean(value[-day:])
result = talib.SMA(value, day)
return result
def HHV(series, day):
"""
返回最大值
"""
# value = max(series[-day:])
if day == 0:
value = pd.Series(index=series.index, dtype=float)
tmp = series.iat[0]
value.iat[0] = tmp
for i in range(series.shape[0]):
if tmp < series.iat[i]:
tmp = series.iat[i]
value.iat[i] = tmp
value = value.fillna(method='ffill') # 向下填充无效值
else:
value = series.rolling(day).max()
value.iloc[0:day-1] = HHV(series.iloc[0:day-1], 0)
return value
def LLV(series, day):
"""
返回最小值
"""
# value = min(value[-day:])
if day == 0:
value = pd.Series(index=series.index, dtype=float)
tmp = series.iat[0]
value.iat[0] = tmp
for i in range(series.shape[0]):
if tmp > series.iat[i]:
tmp = series.iat[i]
value.iat[i] = tmp
value = value.fillna(method='ffill') # 向下填充无效值
else:
value = series.rolling(day).min()
value.iloc[0:day - 1] = LLV(series.iloc[0:day - 1], 0)
return value
def COUNT(series, n):
# rolling方法不行,虽然简单明了但是性能太差
# result = series.rolling(n) \
# .apply(lambda x: x.value_counts().to_dict()[True] if True in x.value_counts().to_dict() else 0)
df = series.to_frame('cond')
df.insert(df.shape[1], 'result', 0)
for index_true in df.loc[df['cond'] == True].index.to_list():
index_int = df.index.get_loc(index_true)
column_int = df.columns.get_loc('result')
df.iloc[index_int:index_int + n, column_int] = df.iloc[index_int:index_int + n, column_int] + 1
result = df['result']
return result
def EXIST(cond, n):
series = cond[-n:]
if True in series.to_list():
return True
else:
return False
def CROSS(s1, s2):
cond1 = s1 > s2
cond2 = s1.shift() <= s2.shift()
result = cond1 & cond2
return result
def BARSLAST(series):
# 上一次条件成立到当前的周期数.
# 用法:
# BARSLAST(X):上一次X不为0到现在的天数
# 例如:
# BARSLAST(CLOSE/REF(CLOSE,1)>=1.1)表示上一个涨停板到当前的周期数
result = pd.Series(index=series.index, dtype=int)
i = 0
for k, v in series.iteritems():
if v:
i = 0
result[k] = i
else:
i = i + 1
result[k] = i
return result
def BARSLASTCOUNT(cond):
# 统计连续满足条件的周期数.
# 用法:
# BARSLASTCOUNT(X),统计连续满足X条件的周期数.
# 例如:
# BARSLASTCOUNT(CLOSE>OPEN)表示统计连续收阳的周期数
result = pd.Series(index=cond.index, dtype=int)
i = 0
for k, v in cond.iteritems():
if v:
i = i + 1
result[k] = i
else:
i = 0
result[k] = i
return result
def VALUEWHEN(cond, value_series):
result = pd.Series(index=cond.index, dtype=float)
result.loc[cond.loc[cond==True].keys()] = value_series.loc[cond.loc[cond==True].keys()]
result = result.fillna(method='ffill') # 向下填充无效值
return result
| 25.323171
| 105
| 0.58223
|
import pandas as pd
def rolling_window(a, window):
from numpy.lib.stride_tricks import sliding_window_view
return sliding_window_view(a, window_shape=window)
def REF(value, day):
if 'list' in str(type(value)):
result = value[~day]
elif 'series' in str(type(value)):
result = value.shift(periods=day)
return result
def MA(value, day) -> float:
import talib
result = talib.SMA(value, day).iat[-1]
return result
def SMA(value, day):
import talib
result = talib.SMA(value, day)
return result
def HHV(series, day):
if day == 0:
value = pd.Series(index=series.index, dtype=float)
tmp = series.iat[0]
value.iat[0] = tmp
for i in range(series.shape[0]):
if tmp < series.iat[i]:
tmp = series.iat[i]
value.iat[i] = tmp
value = value.fillna(method='ffill')
else:
value = series.rolling(day).max()
value.iloc[0:day-1] = HHV(series.iloc[0:day-1], 0)
return value
def LLV(series, day):
if day == 0:
value = pd.Series(index=series.index, dtype=float)
tmp = series.iat[0]
value.iat[0] = tmp
for i in range(series.shape[0]):
if tmp > series.iat[i]:
tmp = series.iat[i]
value.iat[i] = tmp
value = value.fillna(method='ffill')
else:
value = series.rolling(day).min()
value.iloc[0:day - 1] = LLV(series.iloc[0:day - 1], 0)
return value
def COUNT(series, n):
df = series.to_frame('cond')
df.insert(df.shape[1], 'result', 0)
for index_true in df.loc[df['cond'] == True].index.to_list():
index_int = df.index.get_loc(index_true)
column_int = df.columns.get_loc('result')
df.iloc[index_int:index_int + n, column_int] = df.iloc[index_int:index_int + n, column_int] + 1
result = df['result']
return result
def EXIST(cond, n):
series = cond[-n:]
if True in series.to_list():
return True
else:
return False
def CROSS(s1, s2):
cond1 = s1 > s2
cond2 = s1.shift() <= s2.shift()
result = cond1 & cond2
return result
def BARSLAST(series):
result = pd.Series(index=series.index, dtype=int)
i = 0
for k, v in series.iteritems():
if v:
i = 0
result[k] = i
else:
i = i + 1
result[k] = i
return result
def BARSLASTCOUNT(cond):
result = pd.Series(index=cond.index, dtype=int)
i = 0
for k, v in cond.iteritems():
if v:
i = i + 1
result[k] = i
else:
i = 0
result[k] = i
return result
def VALUEWHEN(cond, value_series):
result = pd.Series(index=cond.index, dtype=float)
result.loc[cond.loc[cond==True].keys()] = value_series.loc[cond.loc[cond==True].keys()]
result = result.fillna(method='ffill')
return result
| true
| true
|
1c415d07096084a318cac7610f73c87dd5816ce6
| 2,290
|
py
|
Python
|
dfvfs/encryption/manager.py
|
dfjxs/dfvfs
|
a4154b07bb08c3c86afa2847f3224189dd80c138
|
[
"Apache-2.0"
] | 176
|
2015-01-02T13:55:39.000Z
|
2022-03-12T11:44:37.000Z
|
dfvfs/encryption/manager.py
|
dfjxs/dfvfs
|
a4154b07bb08c3c86afa2847f3224189dd80c138
|
[
"Apache-2.0"
] | 495
|
2015-01-13T06:47:06.000Z
|
2022-03-12T11:07:03.000Z
|
dfvfs/encryption/manager.py
|
dfjxs/dfvfs
|
a4154b07bb08c3c86afa2847f3224189dd80c138
|
[
"Apache-2.0"
] | 62
|
2015-02-23T08:19:38.000Z
|
2022-03-18T06:01:22.000Z
|
# -*- coding: utf-8 -*-
"""The encryption manager."""
class EncryptionManager(object):
"""Encryption manager."""
_decrypters = {}
@classmethod
def DeregisterDecrypter(cls, decrypter):
"""Deregisters a decrypter for a specific encryption method.
Args:
decrypter (type): decrypter class.
Raises:
KeyError: if the corresponding decrypter is not set.
"""
encryption_method = decrypter.ENCRYPTION_METHOD.lower()
if encryption_method not in cls._decrypters:
raise KeyError(
'Decrypter for encryption method: {0:s} not set.'.format(
decrypter.ENCRYPTION_METHOD))
del cls._decrypters[encryption_method]
@classmethod
def GetDecrypter(cls, encryption_method, **kwargs):
"""Retrieves the decrypter object for a specific encryption method.
Args:
encryption_method (str): encryption method identifier.
kwargs (dict): keyword arguments depending on the decrypter.
Returns:
Decrypter: decrypter or None if the encryption method does not exists.
Raises:
CredentialError: if the necessary credentials are missing.
"""
encryption_method = encryption_method.lower()
decrypter = cls._decrypters.get(encryption_method, None)
if not decrypter:
return None
return decrypter(**kwargs)
@classmethod
def RegisterDecrypter(cls, decrypter):
"""Registers a decrypter for a specific encryption method.
Args:
decrypter (type): decrypter class.
Raises:
KeyError: if the corresponding decrypter is already set.
"""
encryption_method = decrypter.ENCRYPTION_METHOD.lower()
if encryption_method in cls._decrypters:
raise KeyError(
'Decrypter for encryption method: {0:s} already set.'.format(
decrypter.ENCRYPTION_METHOD))
cls._decrypters[encryption_method] = decrypter
@classmethod
def RegisterDecrypters(cls, decrypters):
"""Registers decrypters.
The decrypters are identified based on their lower case encryption method.
Args:
decrypters (list[type]): decrypter classes.
Raises:
KeyError: if decrypters is already set for the corresponding
encryption method.
"""
for decrypter in decrypters:
cls.RegisterDecrypter(decrypter)
| 27.926829
| 78
| 0.695197
|
class EncryptionManager(object):
_decrypters = {}
@classmethod
def DeregisterDecrypter(cls, decrypter):
encryption_method = decrypter.ENCRYPTION_METHOD.lower()
if encryption_method not in cls._decrypters:
raise KeyError(
'Decrypter for encryption method: {0:s} not set.'.format(
decrypter.ENCRYPTION_METHOD))
del cls._decrypters[encryption_method]
@classmethod
def GetDecrypter(cls, encryption_method, **kwargs):
encryption_method = encryption_method.lower()
decrypter = cls._decrypters.get(encryption_method, None)
if not decrypter:
return None
return decrypter(**kwargs)
@classmethod
def RegisterDecrypter(cls, decrypter):
encryption_method = decrypter.ENCRYPTION_METHOD.lower()
if encryption_method in cls._decrypters:
raise KeyError(
'Decrypter for encryption method: {0:s} already set.'.format(
decrypter.ENCRYPTION_METHOD))
cls._decrypters[encryption_method] = decrypter
@classmethod
def RegisterDecrypters(cls, decrypters):
for decrypter in decrypters:
cls.RegisterDecrypter(decrypter)
| true
| true
|
1c415de3cd60b3921a89fd82651df715b22e74fa
| 1,029
|
py
|
Python
|
tests/test_math.py
|
ssube/redesigned-barnacle
|
314ea415b6f725c798cc97d6e619fbedc7f8bd21
|
[
"MIT"
] | null | null | null |
tests/test_math.py
|
ssube/redesigned-barnacle
|
314ea415b6f725c798cc97d6e619fbedc7f8bd21
|
[
"MIT"
] | 1
|
2021-11-04T16:00:15.000Z
|
2021-11-04T16:00:15.000Z
|
tests/test_math.py
|
ssube/redesigned-barnacle
|
314ea415b6f725c798cc97d6e619fbedc7f8bd21
|
[
"MIT"
] | null | null | null |
from redesigned_barnacle.math import scale, temp_ftoc
from unittest import TestCase
class TempFToCTest(TestCase):
def test_convert(self):
self.assertAlmostEqual(temp_ftoc(-50), -45.56, 2)
self.assertAlmostEqual(temp_ftoc(-40), -40.00, 2)
self.assertAlmostEqual(temp_ftoc(-30), -34.44, 2)
self.assertAlmostEqual(temp_ftoc(-20), -28.89, 2)
self.assertAlmostEqual(temp_ftoc(-10), -23.33, 2)
self.assertAlmostEqual(temp_ftoc(0), -17.78, 2)
self.assertAlmostEqual(temp_ftoc(10), -12.22, 2)
self.assertAlmostEqual(temp_ftoc(20), -6.67, 2)
self.assertAlmostEqual(temp_ftoc(32), 0, 2)
class ScaleTest(TestCase):
def test_scale(self):
self.assertEqual(scale(0, 0, 10), 0.0)
self.assertEqual(scale(5, 0, 10), 0.5)
self.assertEqual(scale(10, 0, 10), 1.0)
self.assertEqual(scale(15, 0, 10), 1.0)
def test_clamp(self):
self.assertEqual(scale(-10, 0, 1), 0)
self.assertEqual(scale(+10, 0, 1), 1)
| 38.111111
| 57
| 0.638484
|
from redesigned_barnacle.math import scale, temp_ftoc
from unittest import TestCase
class TempFToCTest(TestCase):
def test_convert(self):
self.assertAlmostEqual(temp_ftoc(-50), -45.56, 2)
self.assertAlmostEqual(temp_ftoc(-40), -40.00, 2)
self.assertAlmostEqual(temp_ftoc(-30), -34.44, 2)
self.assertAlmostEqual(temp_ftoc(-20), -28.89, 2)
self.assertAlmostEqual(temp_ftoc(-10), -23.33, 2)
self.assertAlmostEqual(temp_ftoc(0), -17.78, 2)
self.assertAlmostEqual(temp_ftoc(10), -12.22, 2)
self.assertAlmostEqual(temp_ftoc(20), -6.67, 2)
self.assertAlmostEqual(temp_ftoc(32), 0, 2)
class ScaleTest(TestCase):
def test_scale(self):
self.assertEqual(scale(0, 0, 10), 0.0)
self.assertEqual(scale(5, 0, 10), 0.5)
self.assertEqual(scale(10, 0, 10), 1.0)
self.assertEqual(scale(15, 0, 10), 1.0)
def test_clamp(self):
self.assertEqual(scale(-10, 0, 1), 0)
self.assertEqual(scale(+10, 0, 1), 1)
| true
| true
|
1c415e73bea6d4794cd0d3190380c24ee48d1f1b
| 159
|
py
|
Python
|
Scripts/django-admin.py
|
brandongallagher1999/Django_CRUD
|
e7da7c7c63f26e59804a578f1ba5d9562753805f
|
[
"bzip2-1.0.6"
] | null | null | null |
Scripts/django-admin.py
|
brandongallagher1999/Django_CRUD
|
e7da7c7c63f26e59804a578f1ba5d9562753805f
|
[
"bzip2-1.0.6"
] | null | null | null |
Scripts/django-admin.py
|
brandongallagher1999/Django_CRUD
|
e7da7c7c63f26e59804a578f1ba5d9562753805f
|
[
"bzip2-1.0.6"
] | null | null | null |
#!c:\users\crypt\desktop\hatchw~1\scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| 26.5
| 52
| 0.779874
|
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| true
| true
|
1c416042ef8064181370445b109ff0d2c35f5a88
| 3,704
|
py
|
Python
|
contrib/macdeploy/custom_dsstore.py
|
BramandUn/PALLY1
|
036c473dddc7534c0979b159ca458e8a3a8a10cb
|
[
"MIT"
] | null | null | null |
contrib/macdeploy/custom_dsstore.py
|
BramandUn/PALLY1
|
036c473dddc7534c0979b159ca458e8a3a8a10cb
|
[
"MIT"
] | null | null | null |
contrib/macdeploy/custom_dsstore.py
|
BramandUn/PALLY1
|
036c473dddc7534c0979b159ca458e8a3a8a10cb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2013-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import biplist
from ds_store import DSStore
from mac_alias import Alias
import sys
output_file = sys.argv[1]
package_name_ns = sys.argv[2]
ds = DSStore.open(output_file, 'w+')
ds['.']['bwsp'] = {
'ShowStatusBar': False,
'WindowBounds': b'{{300, 280}, {500, 343}}',
'ContainerShowSidebar': False,
'SidebarWidth': 0,
'ShowTabView': False,
'PreviewPaneVisibility': False,
'ShowToolbar': False,
'ShowSidebar': False,
'ShowPathbar': True
}
icvp = {
'gridOffsetX': 0.0,
'textSize': 12.0,
'viewOptionsVersion': 1,
'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00',
'backgroundColorBlue': 1.0,
'iconSize': 96.0,
'backgroundColorGreen': 1.0,
'arrangeBy': 'none',
'showIconPreview': True,
'gridSpacing': 100.0,
'gridOffsetY': 0.0,
'showItemInfo': False,
'labelOnBottom': True,
'backgroundType': 2,
'backgroundColorRed': 1.0
}
alias = Alias.from_bytes(icvp['backgroundImageAlias'])
alias.volume.name = package_name_ns
alias.volume.posix_path = '/Volumes/' + package_name_ns
alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00PALLY1user:\x00Documents:\x00PALLY1:\x00PALLY1:\x00' + package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.posix_path = 'Users/PALLY1user/Documents/PALLY1/PALLY1/' + package_name_ns + '.temp.dmg'
alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff'
icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes())
ds['.']['icvp'] = icvp
ds['.']['vSrn'] = ('long', 1)
ds['Applications']['Iloc'] = (370, 156)
ds['PALLY1-Qt.app']['Iloc'] = (128, 156)
ds.flush()
ds.close()
| 61.733333
| 1,817
| 0.723812
|
import biplist
from ds_store import DSStore
from mac_alias import Alias
import sys
output_file = sys.argv[1]
package_name_ns = sys.argv[2]
ds = DSStore.open(output_file, 'w+')
ds['.']['bwsp'] = {
'ShowStatusBar': False,
'WindowBounds': b'{{300, 280}, {500, 343}}',
'ContainerShowSidebar': False,
'SidebarWidth': 0,
'ShowTabView': False,
'PreviewPaneVisibility': False,
'ShowToolbar': False,
'ShowSidebar': False,
'ShowPathbar': True
}
icvp = {
'gridOffsetX': 0.0,
'textSize': 12.0,
'viewOptionsVersion': 1,
'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00',
'backgroundColorBlue': 1.0,
'iconSize': 96.0,
'backgroundColorGreen': 1.0,
'arrangeBy': 'none',
'showIconPreview': True,
'gridSpacing': 100.0,
'gridOffsetY': 0.0,
'showItemInfo': False,
'labelOnBottom': True,
'backgroundType': 2,
'backgroundColorRed': 1.0
}
alias = Alias.from_bytes(icvp['backgroundImageAlias'])
alias.volume.name = package_name_ns
alias.volume.posix_path = '/Volumes/' + package_name_ns
alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00PALLY1user:\x00Documents:\x00PALLY1:\x00PALLY1:\x00' + package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.posix_path = 'Users/PALLY1user/Documents/PALLY1/PALLY1/' + package_name_ns + '.temp.dmg'
alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff'
icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes())
ds['.']['icvp'] = icvp
ds['.']['vSrn'] = ('long', 1)
ds['Applications']['Iloc'] = (370, 156)
ds['PALLY1-Qt.app']['Iloc'] = (128, 156)
ds.flush()
ds.close()
| true
| true
|
1c41610b8152ea716012d657c593c398587210ff
| 1,934
|
py
|
Python
|
app/modules/user.py
|
hboueix/PyCheckers
|
c1339a004f30f76a33461b52f9633bbbd1204bb0
|
[
"MIT"
] | null | null | null |
app/modules/user.py
|
hboueix/PyCheckers
|
c1339a004f30f76a33461b52f9633bbbd1204bb0
|
[
"MIT"
] | null | null | null |
app/modules/user.py
|
hboueix/PyCheckers
|
c1339a004f30f76a33461b52f9633bbbd1204bb0
|
[
"MIT"
] | null | null | null |
import sys
import socket
import errno
HEADER_LENGTH = 10
IP = "hboueix.fr"
PORT = 63000
class User:
def __init__(self, username):
self.username = username
self.client_socket = self.get_client_socket()
self.recv_text = ''
def get_client_socket(self):
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect((IP, PORT))
client_socket.setblocking(False)
username = self.username.encode('utf-8')
username_header = f"{len(username):<{HEADER_LENGTH}}".encode("utf-8")
client_socket.send(username_header + username)
return client_socket
def run_connection(self, text):
#message = input(f"{self.username}> ")
message = text
if message:
message = message.encode("utf-8")
message_header = f"{len(message):<{HEADER_LENGTH}}".encode("utf-8")
self.client_socket.send(message_header + message)
try:
while True:
username_header = self.client_socket.recv(HEADER_LENGTH)
if not len(username_header):
print("Connection fermée par le serveur")
sys.exit()
username_length = int(username_header.decode("utf-8"))
username = self.client_socket.recv(username_length).decode("utf-8")
message_header = self.client_socket.recv(HEADER_LENGTH)
message_length = int(message_header.decode("utf-8"))
message = self.client_socket.recv(message_length).decode("utf-8")
self.recv_text = f"{username}> {message}"
except IOError as e:
if e.errno != errno.EAGAIN or e.errno != errno.EWOULDBLOCK:
print('Reading error', str(e))
sys.exit()
except Exception as e:
print('General error', str(e))
sys.exit()
| 32.233333
| 83
| 0.59514
|
import sys
import socket
import errno
HEADER_LENGTH = 10
IP = "hboueix.fr"
PORT = 63000
class User:
def __init__(self, username):
self.username = username
self.client_socket = self.get_client_socket()
self.recv_text = ''
def get_client_socket(self):
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect((IP, PORT))
client_socket.setblocking(False)
username = self.username.encode('utf-8')
username_header = f"{len(username):<{HEADER_LENGTH}}".encode("utf-8")
client_socket.send(username_header + username)
return client_socket
def run_connection(self, text):
message = text
if message:
message = message.encode("utf-8")
message_header = f"{len(message):<{HEADER_LENGTH}}".encode("utf-8")
self.client_socket.send(message_header + message)
try:
while True:
username_header = self.client_socket.recv(HEADER_LENGTH)
if not len(username_header):
print("Connection fermée par le serveur")
sys.exit()
username_length = int(username_header.decode("utf-8"))
username = self.client_socket.recv(username_length).decode("utf-8")
message_header = self.client_socket.recv(HEADER_LENGTH)
message_length = int(message_header.decode("utf-8"))
message = self.client_socket.recv(message_length).decode("utf-8")
self.recv_text = f"{username}> {message}"
except IOError as e:
if e.errno != errno.EAGAIN or e.errno != errno.EWOULDBLOCK:
print('Reading error', str(e))
sys.exit()
except Exception as e:
print('General error', str(e))
sys.exit()
| true
| true
|
1c41615ee564edd382539594e6811f0465ddaaf7
| 2,389
|
py
|
Python
|
HouseSpider/HouseSpider/middlewares.py
|
wangzihan424/HouseSpider
|
a3592d4fe4e8bc04a3972dabdbb1edeca3fee036
|
[
"MIT"
] | null | null | null |
HouseSpider/HouseSpider/middlewares.py
|
wangzihan424/HouseSpider
|
a3592d4fe4e8bc04a3972dabdbb1edeca3fee036
|
[
"MIT"
] | null | null | null |
HouseSpider/HouseSpider/middlewares.py
|
wangzihan424/HouseSpider
|
a3592d4fe4e8bc04a3972dabdbb1edeca3fee036
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class HousespiderSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
from fake_useragent import UserAgent
class RandomUserAgentMiddleware(object):
@classmethod
def from_crawler(cls, crawler):
o = cls()
crawler.signals.connect(o.spider_opened, signal=signals.spider_opened)
return o
def __init__(self):
ua = UserAgent()
self.ua = ua
def process_request(self, request, spider):
request.headers.setdefault(b'User-Agent',self.ua.random)
def spider_opened(self, spider):
pass
| 31.853333
| 78
| 0.672666
|
from scrapy import signals
class HousespiderSpiderMiddleware(object):
@classmethod
def from_crawler(cls, crawler):
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
return None
def process_spider_output(self, response, result, spider):
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
pass
def process_start_requests(self, start_requests, spider):
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
from fake_useragent import UserAgent
class RandomUserAgentMiddleware(object):
@classmethod
def from_crawler(cls, crawler):
o = cls()
crawler.signals.connect(o.spider_opened, signal=signals.spider_opened)
return o
def __init__(self):
ua = UserAgent()
self.ua = ua
def process_request(self, request, spider):
request.headers.setdefault(b'User-Agent',self.ua.random)
def spider_opened(self, spider):
pass
| true
| true
|
1c4161c3a422be6f32faf81d7ea781eb278f2d1c
| 38
|
py
|
Python
|
aws_toolbox/__init__.py
|
miniche/aws-toolbox
|
8d334ca1f3eec449db258843cf3e396fe7879484
|
[
"MIT"
] | null | null | null |
aws_toolbox/__init__.py
|
miniche/aws-toolbox
|
8d334ca1f3eec449db258843cf3e396fe7879484
|
[
"MIT"
] | null | null | null |
aws_toolbox/__init__.py
|
miniche/aws-toolbox
|
8d334ca1f3eec449db258843cf3e396fe7879484
|
[
"MIT"
] | null | null | null |
__author__ = 'Charles-Emmanuel CAMUS'
| 19
| 37
| 0.789474
|
__author__ = 'Charles-Emmanuel CAMUS'
| true
| true
|
1c41630a00c44705730ff4723c8867290a108324
| 6,236
|
py
|
Python
|
meme_clusters2sqlite.py
|
musically-ut/datasets2sqlite
|
b9f40cf763b632343e1f35011826239952d9441c
|
[
"MIT"
] | 1
|
2015-10-25T21:23:23.000Z
|
2015-10-25T21:23:23.000Z
|
meme_clusters2sqlite.py
|
musically-ut/datasets2sqlite
|
b9f40cf763b632343e1f35011826239952d9441c
|
[
"MIT"
] | null | null | null |
meme_clusters2sqlite.py
|
musically-ut/datasets2sqlite
|
b9f40cf763b632343e1f35011826239952d9441c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from __future__ import print_function
import sqlite3
import gzip
import bz2
import sys
import argparse
import datetime as D
def logTime(chkpoint):
print('*** Checkpoint: {} at \x1b[31m{}\x1b[0m'.format(chkpoint, D.datetime.now()))
sys.stdout.flush()
argParser = argparse.ArgumentParser()
argParser.add_argument('clusterFile',
help='The file to read clusters from.')
argParser.add_argument('sqlitedb',
help='The sqlite table to fill.')
argParser.add_argument('table_prefix',
help='The prefix of table names in SQLite.')
group = argParser.add_mutually_exclusive_group()
group.add_argument('--gzip',
help='Assume file uses the gzip compression.',
action='store_true')
group.add_argument('--bz2',
help='Assume file uses bz2 compression.',
action='store_true')
args = argParser.parse_args()
if args.bz2:
inputFile = bz2.BZ2File(args.clusterFile, 'rU')
elif args.gzip:
inputFile = gzip.open(args.clusterFile, 'rU')
else:
inputFile = open(args.clusterFile, 'rU')
conn = sqlite3.connect(args.sqlitedb)
# Always return bytestring instead of unicode.
conn.text_factory = str
cur = conn.cursor()
def blockReader(inputFile):
'''Read one cluster block from the passed file.'''
line = 0
A_line, B_line, C_line = '', '', ''
while True:
try:
block = {}
line += 1
A_line = inputFile.readline()
if A_line == '':
break
A_data = A_line.split('\t')
B_count = int(A_data[0])
block['cluster_size'] = B_count
block['total_frequency'] = int(A_data[1])
block['root'] = A_data[2]
block['cluster_id'] = int(A_data[3])
block['B'] = []
for b_cluster_num in range(B_count):
block['B'].append({})
B = block['B'][b_cluster_num]
line += 1
B_line = inputFile.readline().strip()
B_data = B_line.split('\t')
B['total_phrase_frequency'] = int(B_data[0])
C_count = int(B_data[1])
B['num_urls'] = C_count
B['phrase'] = B_data[2]
B['phrase_id'] = int(B_data[3])
B['C'] = []
for c_cluster_num in range(C_count):
B['C'].append({})
C = B['C'][c_cluster_num]
line += 1
C_line = inputFile.readline().strip()
C_data = C_line.split('\t')
C['timestamp'] = C_data[0]
C['frequency_in_url'] = int(C_data[1])
C['url_type'] = C_data[2]
C['url'] = C_data[3]
line += 1
# There is an empty line after each C block, except last one
emptyLine = inputFile.readline().strip()
assert emptyLine == '', "Empty line after C block not found. Found '{}' instead".format(emptyLine)
yield block
except IOError as e:
print('Encountered error: ', e, ' at line: ', line)
break
except IndexError as e:
print('Encountered index error: ', e, ' at line: ', line)
print('A_line = ', A_line)
print('B_line = ', B_line)
print('C_line = ', C_line)
break
# Skip the header of the file
for header_line in xrange(6):
inputFile.readline()
table_prefix = args.table_prefix
table_root = table_prefix + '_roots'
columns_root = '"cluster_size" INTEGER, "total_frequency" INTEGER, "root" TEXT, "cluster_id" TEXT'
insert_root = 'INSERT INTO %s VALUES (?, ?, ?, ?)' % (table_root,)
table_derivative = table_prefix + '_derivatives'
columns_derivative = '"cluster_id" INTEGER, "total_phrase_frequency" INTEGER, "num_urls" INTEGER, "phrase" TEXT, "phrase_id" INTEGER'
insert_derivative = 'INSERT INTO %s VALUES (?, ?, ?, ?, ?)' % (table_derivative,)
table_phrase_info = table_prefix + '_phrase_info'
columns_phrase_info = '"cluster_id" INTEGER, "phrase_id" INTEGER, "frequency_in_url" INTEGER, "timestamp" TEXT, "url_type" TEXT, "url" TEXT'
insert_phrase_info = 'INSERT INTO %s VALUES (?, ?, ?, ?, ?, ?)' % (table_phrase_info,)
for table, columns in [(table_root, columns_root),
(table_derivative, columns_derivative),
(table_phrase_info, columns_phrase_info)]:
create_query = 'CREATE TABLE %s (%s)' % (table, columns)
try:
cur.execute(create_query)
logTime('Created table {}'.format(table))
except:
logTime('Skipping creation of table {}'.format(table))
blockNum = 0
try:
for block in blockReader(inputFile):
blockNum += 1
try:
cur.execute(insert_root, (block['cluster_size'],
block['total_frequency'],
block['root'],
block['cluster_id']))
for B in block['B']:
cur.execute(insert_derivative, (block['cluster_id'],
B['total_phrase_frequency'],
B['num_urls'],
B['phrase'],
B['phrase_id']))
for C in B['C']:
cur.execute(insert_phrase_info, (block['cluster_id'],
B['phrase_id'],
C['frequency_in_url'],
C['timestamp'],
C['url_type'],
C['url']))
except Exception as e:
print("Error in block %d: %s" % (blockNum, e), file=sys.stderr)
except Exception as e:
print('General error in block %d: %s' % (blockNum, e), file=sys.stderr)
logTime('Rolling back changes')
conn.rollback()
cur.close()
else:
logTime('Committing to disk')
conn.commit()
cur.close()
logTime('Finished')
| 34.453039
| 140
| 0.527101
|
from __future__ import print_function
import sqlite3
import gzip
import bz2
import sys
import argparse
import datetime as D
def logTime(chkpoint):
print('*** Checkpoint: {} at \x1b[31m{}\x1b[0m'.format(chkpoint, D.datetime.now()))
sys.stdout.flush()
argParser = argparse.ArgumentParser()
argParser.add_argument('clusterFile',
help='The file to read clusters from.')
argParser.add_argument('sqlitedb',
help='The sqlite table to fill.')
argParser.add_argument('table_prefix',
help='The prefix of table names in SQLite.')
group = argParser.add_mutually_exclusive_group()
group.add_argument('--gzip',
help='Assume file uses the gzip compression.',
action='store_true')
group.add_argument('--bz2',
help='Assume file uses bz2 compression.',
action='store_true')
args = argParser.parse_args()
if args.bz2:
inputFile = bz2.BZ2File(args.clusterFile, 'rU')
elif args.gzip:
inputFile = gzip.open(args.clusterFile, 'rU')
else:
inputFile = open(args.clusterFile, 'rU')
conn = sqlite3.connect(args.sqlitedb)
conn.text_factory = str
cur = conn.cursor()
def blockReader(inputFile):
line = 0
A_line, B_line, C_line = '', '', ''
while True:
try:
block = {}
line += 1
A_line = inputFile.readline()
if A_line == '':
break
A_data = A_line.split('\t')
B_count = int(A_data[0])
block['cluster_size'] = B_count
block['total_frequency'] = int(A_data[1])
block['root'] = A_data[2]
block['cluster_id'] = int(A_data[3])
block['B'] = []
for b_cluster_num in range(B_count):
block['B'].append({})
B = block['B'][b_cluster_num]
line += 1
B_line = inputFile.readline().strip()
B_data = B_line.split('\t')
B['total_phrase_frequency'] = int(B_data[0])
C_count = int(B_data[1])
B['num_urls'] = C_count
B['phrase'] = B_data[2]
B['phrase_id'] = int(B_data[3])
B['C'] = []
for c_cluster_num in range(C_count):
B['C'].append({})
C = B['C'][c_cluster_num]
line += 1
C_line = inputFile.readline().strip()
C_data = C_line.split('\t')
C['timestamp'] = C_data[0]
C['frequency_in_url'] = int(C_data[1])
C['url_type'] = C_data[2]
C['url'] = C_data[3]
line += 1
emptyLine = inputFile.readline().strip()
assert emptyLine == '', "Empty line after C block not found. Found '{}' instead".format(emptyLine)
yield block
except IOError as e:
print('Encountered error: ', e, ' at line: ', line)
break
except IndexError as e:
print('Encountered index error: ', e, ' at line: ', line)
print('A_line = ', A_line)
print('B_line = ', B_line)
print('C_line = ', C_line)
break
for header_line in xrange(6):
inputFile.readline()
table_prefix = args.table_prefix
table_root = table_prefix + '_roots'
columns_root = '"cluster_size" INTEGER, "total_frequency" INTEGER, "root" TEXT, "cluster_id" TEXT'
insert_root = 'INSERT INTO %s VALUES (?, ?, ?, ?)' % (table_root,)
table_derivative = table_prefix + '_derivatives'
columns_derivative = '"cluster_id" INTEGER, "total_phrase_frequency" INTEGER, "num_urls" INTEGER, "phrase" TEXT, "phrase_id" INTEGER'
insert_derivative = 'INSERT INTO %s VALUES (?, ?, ?, ?, ?)' % (table_derivative,)
table_phrase_info = table_prefix + '_phrase_info'
columns_phrase_info = '"cluster_id" INTEGER, "phrase_id" INTEGER, "frequency_in_url" INTEGER, "timestamp" TEXT, "url_type" TEXT, "url" TEXT'
insert_phrase_info = 'INSERT INTO %s VALUES (?, ?, ?, ?, ?, ?)' % (table_phrase_info,)
for table, columns in [(table_root, columns_root),
(table_derivative, columns_derivative),
(table_phrase_info, columns_phrase_info)]:
create_query = 'CREATE TABLE %s (%s)' % (table, columns)
try:
cur.execute(create_query)
logTime('Created table {}'.format(table))
except:
logTime('Skipping creation of table {}'.format(table))
blockNum = 0
try:
for block in blockReader(inputFile):
blockNum += 1
try:
cur.execute(insert_root, (block['cluster_size'],
block['total_frequency'],
block['root'],
block['cluster_id']))
for B in block['B']:
cur.execute(insert_derivative, (block['cluster_id'],
B['total_phrase_frequency'],
B['num_urls'],
B['phrase'],
B['phrase_id']))
for C in B['C']:
cur.execute(insert_phrase_info, (block['cluster_id'],
B['phrase_id'],
C['frequency_in_url'],
C['timestamp'],
C['url_type'],
C['url']))
except Exception as e:
print("Error in block %d: %s" % (blockNum, e), file=sys.stderr)
except Exception as e:
print('General error in block %d: %s' % (blockNum, e), file=sys.stderr)
logTime('Rolling back changes')
conn.rollback()
cur.close()
else:
logTime('Committing to disk')
conn.commit()
cur.close()
logTime('Finished')
| true
| true
|
1c4164c5f31496240c8e2857c60e11d8b7730cbe
| 2,729
|
py
|
Python
|
IMERG_LR/main.py
|
VSCHY/download_SatPP
|
58ccffbce639496afdb54bbf41cd965f2c3b7037
|
[
"MIT"
] | 1
|
2021-12-11T07:41:14.000Z
|
2021-12-11T07:41:14.000Z
|
IMERG_LR/main.py
|
VSCHY/download_SatPP
|
58ccffbce639496afdb54bbf41cd965f2c3b7037
|
[
"MIT"
] | null | null | null |
IMERG_LR/main.py
|
VSCHY/download_SatPP
|
58ccffbce639496afdb54bbf41cd965f2c3b7037
|
[
"MIT"
] | null | null | null |
import sys
sys.path.append("./library")
import requests
from datetime import date, timedelta
import numpy as np
import os
import subprocess
import time
import multiprocessing
import random
import glob
from imerg_func import *
import download_function as down
from calendar import monthrange
import configparser
config=configparser.ConfigParser()
config.read("config.def")
output=config.get("OverAll", "output")
temp_dir=config.get("OverAll", "temp_dir")
numproc=config.getint("OverAll", "numproc", fallback = 1)
##########################################
class download:
def __init__(self, output, temp_dir):
self.output = output
self.temp_dir = temp_dir
os.chdir(self.temp_dir)
def download_month(self, year, month):
output = self.output+"{0}/{1:02}/".format(year, month)
#
if not os.path.exists(output):
os.makedirs(output)
dmon = self.output+"{0}/".format(year, month)+"3B-HHR-L.MS.MRG.3IMERG.daily.month"+str(month).zfill(2)+str(year)+".nc"
if not os.path.isfile(dmon):
d = monthrange(year, month)[1]
for i in range(1,d+1):
t0 = time.time()
if len([name for name in os.listdir(temp_dir) if os.path.isfile(name)])>0:
down.empty_directory(temp_dir)
datet = "{0}{1:02}{2:02}".format(year, month, i)
filen = output+"3B-HHR-L.MS.MRG.3IMERG."+datet+".nc"
if __name__ == "__main__":
pool = multiprocessing.Pool(processes=numproc)
if not os.path.isfile(filen):
L = down.get_HH_urls_day(year, month, i)
numfile = 0
while numfile <48:
if __name__ == "__main__":
pool.map(down.download, L)
numfile = len([name for name in os.listdir('.') if os.path.isfile(name)])
if __name__ == "__main__":
pool.close()
ds = imerg_xarray_from_date(temp_dir, "{0}-{1}-{2}".format(year,month,i), "Late")
ds.to_netcdf(path=filen, unlimited_dims = ["time"])
ds.close()
t1 = time.time()
print("FINAL TIME - {0}/{1}/{2}".format(i,month,year), int(t1-t0), "s.")
subprocess.check_call(["cdo", "mergetime",output+"*.nc", dmon])
down.empty_directory(output)
##########################################
if __name__ == "__main__":
d = download(output,temp_dir)
# Define list of years
# ex : YEAR = [2019]
YEAR = [2019]
# Define list of months
# ex : MONTH = np.arange(1,12)
MONTH = np.arange(1,2)
for y in YEAR:
for m in MONTH:
d.download_month(y, m)
| 34.544304
| 124
| 0.562111
|
import sys
sys.path.append("./library")
import requests
from datetime import date, timedelta
import numpy as np
import os
import subprocess
import time
import multiprocessing
import random
import glob
from imerg_func import *
import download_function as down
from calendar import monthrange
import configparser
config=configparser.ConfigParser()
config.read("config.def")
output=config.get("OverAll", "output")
temp_dir=config.get("OverAll", "temp_dir")
numproc=config.getint("OverAll", "numproc", fallback = 1)
if __name__ == "__main__":
pool = multiprocessing.Pool(processes=numproc)
if not os.path.isfile(filen):
L = down.get_HH_urls_day(year, month, i)
numfile = 0
while numfile <48:
if __name__ == "__main__":
pool.map(down.download, L)
numfile = len([name for name in os.listdir('.') if os.path.isfile(name)])
if __name__ == "__main__":
pool.close()
ds = imerg_xarray_from_date(temp_dir, "{0}-{1}-{2}".format(year,month,i), "Late")
ds.to_netcdf(path=filen, unlimited_dims = ["time"])
ds.close()
t1 = time.time()
print("FINAL TIME - {0}/{1}/{2}".format(i,month,year), int(t1-t0), "s.")
subprocess.check_call(["cdo", "mergetime",output+"*.nc", dmon])
down.empty_directory(output)
| true
| true
|
1c4164f928ee5e3d0296312887baf66d219abfbf
| 774
|
py
|
Python
|
reader/models/entry.py
|
joyfulflyer/billboard-viewer
|
40b1c27f22858f45f0d745a216697db05a4f8e66
|
[
"MIT"
] | null | null | null |
reader/models/entry.py
|
joyfulflyer/billboard-viewer
|
40b1c27f22858f45f0d745a216697db05a4f8e66
|
[
"MIT"
] | 2
|
2021-08-09T17:18:15.000Z
|
2021-08-09T17:18:20.000Z
|
reader/models/entry.py
|
joyfulflyer/billboard-viewer
|
40b1c27f22858f45f0d745a216697db05a4f8e66
|
[
"MIT"
] | null | null | null |
from sqlalchemy import Column, ForeignKey, Integer, String
from .base import Base
from .song import Song
class Entry(Base):
__tablename__ = 'entries'
id = Column(Integer, primary_key=True)
name = Column(String(256), nullable=False)
artist = Column(String(128), nullable=False)
place = Column(Integer, nullable=False)
peak_position = Column(Integer, nullable=True)
last_position = Column(Integer, nullable=True)
weeks_on_chart = Column(Integer, nullable=True)
chart_id = Column(Integer, ForeignKey("charts.id"), nullable=False)
song_id = Column(Integer, ForeignKey("%s.id" % (Song.__tablename__)))
def __repr__(self):
return "Entry: <id='%r', name='%r', artist='%r'>" % \
(self.id, self.name, self.artist)
| 33.652174
| 73
| 0.682171
|
from sqlalchemy import Column, ForeignKey, Integer, String
from .base import Base
from .song import Song
class Entry(Base):
__tablename__ = 'entries'
id = Column(Integer, primary_key=True)
name = Column(String(256), nullable=False)
artist = Column(String(128), nullable=False)
place = Column(Integer, nullable=False)
peak_position = Column(Integer, nullable=True)
last_position = Column(Integer, nullable=True)
weeks_on_chart = Column(Integer, nullable=True)
chart_id = Column(Integer, ForeignKey("charts.id"), nullable=False)
song_id = Column(Integer, ForeignKey("%s.id" % (Song.__tablename__)))
def __repr__(self):
return "Entry: <id='%r', name='%r', artist='%r'>" % \
(self.id, self.name, self.artist)
| true
| true
|
1c4166cf120940029f565f6c6ea135b19e969666
| 1,789
|
py
|
Python
|
bounties_api/std_bounties/views/issuer_leaderboard_views.py
|
tenthirtyone/BountiesAPI
|
2bb449a947d987072be24633ba36fbd67c0ab29b
|
[
"MIT"
] | 45
|
2018-03-24T21:37:59.000Z
|
2021-11-12T11:53:04.000Z
|
bounties_api/std_bounties/views/issuer_leaderboard_views.py
|
tenthirtyone/BountiesAPI
|
2bb449a947d987072be24633ba36fbd67c0ab29b
|
[
"MIT"
] | 192
|
2018-03-15T22:42:51.000Z
|
2022-02-12T11:42:20.000Z
|
bounties_api/std_bounties/views/issuer_leaderboard_views.py
|
tenthirtyone/BountiesAPI
|
2bb449a947d987072be24633ba36fbd67c0ab29b
|
[
"MIT"
] | 27
|
2018-03-23T17:12:27.000Z
|
2021-12-06T02:21:26.000Z
|
from django.http import JsonResponse
from django.db import connection
from rest_framework.views import APIView
from bounties.utils import extractInParams, limitOffsetParams, sqlGenerateOrList, dictfetchall
from std_bounties.serializers import LeaderboardIssuerSerializer
from std_bounties.queries import LEADERBOARD_ISSUER_QUERY, LEADERBOARD_ISSUER_QUERY_TOKENS
class LeaderboardIssuer(APIView):
@staticmethod
def get(request):
sql_param = ''
platform_in = extractInParams(request, 'platform', 'platform__in')
token_in = extractInParams(request, 'token', 'token__in')
startIndex, endIndex = limitOffsetParams(request)
if platform_in:
sql_param = 'AND ( '
sql_param += sqlGenerateOrList(
'fulfillment.\"platform\"', len(platform_in), '=')
sql_param += ' OR '
sql_param += sqlGenerateOrList('bounty.\"platform\"',
len(platform_in), '=')
sql_param += ' )'
platform_in = platform_in + platform_in
if token_in:
sql_param += 'AND ( '
sql_param += "bounty.\"token_contract\" = \'"
sql_param += token_in[0]
sql_param += "\')"
formatted_query = LEADERBOARD_ISSUER_QUERY_TOKENS.format(sql_param)
else:
formatted_query = LEADERBOARD_ISSUER_QUERY.format(sql_param)
cursor = connection.cursor()
cursor.execute(formatted_query, platform_in)
query_result = dictfetchall(cursor)
narrowed_result = query_result[startIndex: endIndex]
serializer = LeaderboardIssuerSerializer(narrowed_result, many=True)
return JsonResponse({'count': len(query_result), 'results': serializer.data}, safe=False)
| 44.725
| 97
| 0.658468
|
from django.http import JsonResponse
from django.db import connection
from rest_framework.views import APIView
from bounties.utils import extractInParams, limitOffsetParams, sqlGenerateOrList, dictfetchall
from std_bounties.serializers import LeaderboardIssuerSerializer
from std_bounties.queries import LEADERBOARD_ISSUER_QUERY, LEADERBOARD_ISSUER_QUERY_TOKENS
class LeaderboardIssuer(APIView):
@staticmethod
def get(request):
sql_param = ''
platform_in = extractInParams(request, 'platform', 'platform__in')
token_in = extractInParams(request, 'token', 'token__in')
startIndex, endIndex = limitOffsetParams(request)
if platform_in:
sql_param = 'AND ( '
sql_param += sqlGenerateOrList(
'fulfillment.\"platform\"', len(platform_in), '=')
sql_param += ' OR '
sql_param += sqlGenerateOrList('bounty.\"platform\"',
len(platform_in), '=')
sql_param += ' )'
platform_in = platform_in + platform_in
if token_in:
sql_param += 'AND ( '
sql_param += "bounty.\"token_contract\" = \'"
sql_param += token_in[0]
sql_param += "\')"
formatted_query = LEADERBOARD_ISSUER_QUERY_TOKENS.format(sql_param)
else:
formatted_query = LEADERBOARD_ISSUER_QUERY.format(sql_param)
cursor = connection.cursor()
cursor.execute(formatted_query, platform_in)
query_result = dictfetchall(cursor)
narrowed_result = query_result[startIndex: endIndex]
serializer = LeaderboardIssuerSerializer(narrowed_result, many=True)
return JsonResponse({'count': len(query_result), 'results': serializer.data}, safe=False)
| true
| true
|
1c41671304cbc88eb8f5cf188d0401df08e5547c
| 6,002
|
py
|
Python
|
celery/utils/compat.py
|
westurner/celery
|
41964f24df4c8ca5d96bf0d644e40778212e10cb
|
[
"BSD-3-Clause"
] | 1
|
2015-02-27T00:56:02.000Z
|
2015-02-27T00:56:02.000Z
|
celery/utils/compat.py
|
westurner/celery
|
41964f24df4c8ca5d96bf0d644e40778212e10cb
|
[
"BSD-3-Clause"
] | null | null | null |
celery/utils/compat.py
|
westurner/celery
|
41964f24df4c8ca5d96bf0d644e40778212e10cb
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
celery.utils.compat
~~~~~~~~~~~~~~~~~~~
Backward compatible implementations of features
only available in newer Python versions.
:copyright: (c) 2009 - 2012 by Ask Solem.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
############## py3k #########################################################
import sys
is_py3k = sys.version_info[0] == 3
try:
reload = reload # noqa
except NameError: # pragma: no cover
from imp import reload # noqa
try:
from UserList import UserList # noqa
except ImportError: # pragma: no cover
from collections import UserList # noqa
try:
from UserDict import UserDict # noqa
except ImportError: # pragma: no cover
from collections import UserDict # noqa
if is_py3k: # pragma: no cover
from io import StringIO, BytesIO
from .encoding import bytes_to_str
class WhateverIO(StringIO):
def write(self, data):
StringIO.write(self, bytes_to_str(data))
else:
from StringIO import StringIO # noqa
BytesIO = WhateverIO = StringIO # noqa
############## collections.OrderedDict ######################################
try:
from collections import OrderedDict
except ImportError: # pragma: no cover
from ordereddict import OrderedDict # noqa
############## itertools.zip_longest #######################################
try:
from itertools import izip_longest as zip_longest
except ImportError: # pragma: no cover
import itertools
def zip_longest(*args, **kwds): # noqa
fillvalue = kwds.get("fillvalue")
def sentinel(counter=([fillvalue] * (len(args) - 1)).pop):
yield counter() # yields the fillvalue, or raises IndexError
fillers = itertools.repeat(fillvalue)
iters = [itertools.chain(it, sentinel(), fillers)
for it in args]
try:
for tup in itertools.izip(*iters):
yield tup
except IndexError:
pass
############## itertools.chain.from_iterable ################################
from itertools import chain
def _compat_chain_from_iterable(iterables): # pragma: no cover
for it in iterables:
for element in it:
yield element
try:
chain_from_iterable = getattr(chain, "from_iterable")
except AttributeError: # pragma: no cover
chain_from_iterable = _compat_chain_from_iterable
############## logging.handlers.WatchedFileHandler ##########################
import logging
import os
from stat import ST_DEV, ST_INO
import platform as _platform
if _platform.system() == "Windows": # pragma: no cover
#since windows doesn't go with WatchedFileHandler use FileHandler instead
WatchedFileHandler = logging.FileHandler
else:
try:
from logging.handlers import WatchedFileHandler
except ImportError: # pragma: no cover
class WatchedFileHandler(logging.FileHandler): # noqa
"""
A handler for logging to a file, which watches the file
to see if it has changed while in use. This can happen because of
usage of programs such as newsyslog and logrotate which perform
log file rotation. This handler, intended for use under Unix,
watches the file to see if it has changed since the last emit.
(A file has changed if its device or inode have changed.)
If it has changed, the old file stream is closed, and the file
opened to get a new stream.
This handler is not appropriate for use under Windows, because
under Windows open files cannot be moved or renamed - logging
opens the files with exclusive locks - and so there is no need
for such a handler. Furthermore, ST_INO is not supported under
Windows; stat always returns zero for this value.
This handler is based on a suggestion and patch by Chad J.
Schroeder.
"""
def __init__(self, *args, **kwargs):
logging.FileHandler.__init__(self, *args, **kwargs)
if not os.path.exists(self.baseFilename):
self.dev, self.ino = -1, -1
else:
stat = os.stat(self.baseFilename)
self.dev, self.ino = stat[ST_DEV], stat[ST_INO]
def emit(self, record):
"""
Emit a record.
First check if the underlying file has changed, and if it
has, close the old stream and reopen the file to get the
current stream.
"""
if not os.path.exists(self.baseFilename):
stat = None
changed = 1
else:
stat = os.stat(self.baseFilename)
changed = ((stat[ST_DEV] != self.dev) or
(stat[ST_INO] != self.ino))
if changed and self.stream is not None:
self.stream.flush()
self.stream.close()
self.stream = self._open()
if stat is None:
stat = os.stat(self.baseFilename)
self.dev, self.ino = stat[ST_DEV], stat[ST_INO]
logging.FileHandler.emit(self, record)
############## format(int, ',d') ##########################
if sys.version_info >= (2, 7):
def format_d(i):
return format(i, ',d')
else:
def format_d(i): # noqa
s = '%d' % i
groups = []
while s and s[-1].isdigit():
groups.append(s[-3:])
s = s[:-3]
return s + ','.join(reversed(groups))
| 35.099415
| 77
| 0.545485
|
from __future__ import absolute_import
| true
| true
|
1c4167493f0b56ba96ed1136204fbfbc16070956
| 268
|
py
|
Python
|
sshspawner/__init__.py
|
andersy005/jupyterhub-sshspawner
|
f9a7d3e79933fb09e5d1ad047dc7360650355a7e
|
[
"BSD-3-Clause"
] | null | null | null |
sshspawner/__init__.py
|
andersy005/jupyterhub-sshspawner
|
f9a7d3e79933fb09e5d1ad047dc7360650355a7e
|
[
"BSD-3-Clause"
] | null | null | null |
sshspawner/__init__.py
|
andersy005/jupyterhub-sshspawner
|
f9a7d3e79933fb09e5d1ad047dc7360650355a7e
|
[
"BSD-3-Clause"
] | null | null | null |
# flake8: noqa
from pkg_resources import DistributionNotFound, get_distribution
from sshspawner.spawner import SSHSpawner
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound:
# package is not installed
__version__ = '0.0.0'
| 24.363636
| 64
| 0.791045
|
from pkg_resources import DistributionNotFound, get_distribution
from sshspawner.spawner import SSHSpawner
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound:
__version__ = '0.0.0'
| true
| true
|
1c4167d76d465eec4113edccbecd76a3932b315b
| 1,723
|
py
|
Python
|
test/augmenter/word/test_wordnet.py
|
booltime/nlpaug
|
d21e51bacd170dcd3dddfc34a401f0215f91dbf1
|
[
"MIT"
] | 1
|
2021-09-08T09:18:02.000Z
|
2021-09-08T09:18:02.000Z
|
test/augmenter/word/test_wordnet.py
|
booltime/nlpaug
|
d21e51bacd170dcd3dddfc34a401f0215f91dbf1
|
[
"MIT"
] | null | null | null |
test/augmenter/word/test_wordnet.py
|
booltime/nlpaug
|
d21e51bacd170dcd3dddfc34a401f0215f91dbf1
|
[
"MIT"
] | null | null | null |
import unittest
import nlpaug.augmenter.word as naw
class TestWordNet(unittest.TestCase):
def test_substitute(self):
texts = [
'The quick brown fox jumps over the lazy dog'
]
aug = naw.WordNetAug()
for text in texts:
self.assertLess(0, len(text))
augmented_text = aug.augment(text)
self.assertNotEqual(text, augmented_text)
self.assertLess(0, len(texts))
def test_stopwords(self):
text = 'The quick brown fox jumps over the lazy dog'
aug = naw.WordNetAug(stopwords=['quick', 'brown', 'fox'])
self.assertLess(0, len(text))
augmented_text = aug.augment(text)
self.assertNotEqual(text, augmented_text)
def test_no_separator(self):
"""
Pull#11: Remove seperator (underscore/ hyphen)
:return:
"""
texts = [
"linguistic"
]
aug = naw.WordNetAug()
for text in texts:
self.assertLess(0, len(text))
augmented_text = aug.augment(text)
for separator in ['-', '_']:
self.assertNotIn(separator, augmented_text)
self.assertNotEqual(text, augmented_text)
self.assertLess(0, len(texts))
def test_single_word(self):
"""
Issue#10: contains one character words like: 'I a'
:return:
"""
texts = [
"I work in a middle school"
]
aug = naw.WordNetAug()
for text in texts:
self.assertLess(0, len(text))
augmented_text = aug.augment(text)
self.assertNotEqual(text, augmented_text)
self.assertLess(0, len(texts))
| 25.716418
| 65
| 0.56065
|
import unittest
import nlpaug.augmenter.word as naw
class TestWordNet(unittest.TestCase):
def test_substitute(self):
texts = [
'The quick brown fox jumps over the lazy dog'
]
aug = naw.WordNetAug()
for text in texts:
self.assertLess(0, len(text))
augmented_text = aug.augment(text)
self.assertNotEqual(text, augmented_text)
self.assertLess(0, len(texts))
def test_stopwords(self):
text = 'The quick brown fox jumps over the lazy dog'
aug = naw.WordNetAug(stopwords=['quick', 'brown', 'fox'])
self.assertLess(0, len(text))
augmented_text = aug.augment(text)
self.assertNotEqual(text, augmented_text)
def test_no_separator(self):
texts = [
"linguistic"
]
aug = naw.WordNetAug()
for text in texts:
self.assertLess(0, len(text))
augmented_text = aug.augment(text)
for separator in ['-', '_']:
self.assertNotIn(separator, augmented_text)
self.assertNotEqual(text, augmented_text)
self.assertLess(0, len(texts))
def test_single_word(self):
texts = [
"I work in a middle school"
]
aug = naw.WordNetAug()
for text in texts:
self.assertLess(0, len(text))
augmented_text = aug.augment(text)
self.assertNotEqual(text, augmented_text)
self.assertLess(0, len(texts))
| true
| true
|
1c41694e4fca0b78923596a73b7d57d20fdf84da
| 2,063
|
py
|
Python
|
pymagnitude/third_party/allennlp/data/fields/array_field.py
|
tpeng/magnitude
|
aec98628b5547773ca8c4114ec6d1ad51e21b230
|
[
"MIT"
] | 1,520
|
2018-03-01T13:37:49.000Z
|
2022-03-25T11:40:20.000Z
|
pymagnitude/third_party/allennlp/data/fields/array_field.py
|
tpeng/magnitude
|
aec98628b5547773ca8c4114ec6d1ad51e21b230
|
[
"MIT"
] | 87
|
2018-03-03T15:12:50.000Z
|
2022-02-21T15:24:12.000Z
|
pymagnitude/third_party/allennlp/data/fields/array_field.py
|
tpeng/magnitude
|
aec98628b5547773ca8c4114ec6d1ad51e21b230
|
[
"MIT"
] | 121
|
2018-03-03T08:40:53.000Z
|
2022-03-16T05:19:38.000Z
|
from __future__ import absolute_import
#typing
import numpy
import torch
#overrides
from allennlp.data.fields.field import Field
class ArrayField(Field):
u"""
A class representing an array, which could have arbitrary dimensions.
A batch of these arrays are padded to the max dimension length in the batch
for each dimension.
"""
def __init__(self, array , padding_value = 0) :
self.array = array
self.padding_value = padding_value
#overrides
def get_padding_lengths(self) :
return dict((u"dimension_" + unicode(i), shape)
for i, shape in enumerate(self.array.shape))
#overrides
def as_tensor(self,
padding_lengths ,
cuda_device = -1) :
max_shape = [padding_lengths[u"dimension_{}".format(i)]
for i in range(len(padding_lengths))]
return_array = numpy.ones(max_shape, u"float32") * self.padding_value
# If the tensor has a different shape from the largest tensor, pad dimensions with zeros to
# form the right shaped list of slices for insertion into the final tensor.
slicing_shape = list(self.array.shape)
if len(self.array.shape) < len(max_shape):
slicing_shape = slicing_shape + [0 for _ in range(len(max_shape) - len(self.array.shape))]
slices = tuple([slice(0, x) for x in slicing_shape])
return_array[slices] = self.array
tensor = torch.from_numpy(return_array)
return tensor if cuda_device == -1 else tensor.cuda(cuda_device)
#overrides
def empty_field(self): # pylint: disable=no-self-use
# Pass the padding_value, so that any outer field, e.g., `ListField[ArrayField]` uses the
# same padding_value in the padded ArrayFields
return ArrayField(numpy.array([], dtype=u"float32"), padding_value=self.padding_value)
def __str__(self) :
return "ArrayField with shape: {self.array.shape}."
| 37.509091
| 102
| 0.635967
|
from __future__ import absolute_import
import numpy
import torch
from allennlp.data.fields.field import Field
class ArrayField(Field):
def __init__(self, array , padding_value = 0) :
self.array = array
self.padding_value = padding_value
def get_padding_lengths(self) :
return dict((u"dimension_" + unicode(i), shape)
for i, shape in enumerate(self.array.shape))
def as_tensor(self,
padding_lengths ,
cuda_device = -1) :
max_shape = [padding_lengths[u"dimension_{}".format(i)]
for i in range(len(padding_lengths))]
return_array = numpy.ones(max_shape, u"float32") * self.padding_value
slicing_shape = list(self.array.shape)
if len(self.array.shape) < len(max_shape):
slicing_shape = slicing_shape + [0 for _ in range(len(max_shape) - len(self.array.shape))]
slices = tuple([slice(0, x) for x in slicing_shape])
return_array[slices] = self.array
tensor = torch.from_numpy(return_array)
return tensor if cuda_device == -1 else tensor.cuda(cuda_device)
def empty_field(self):
return ArrayField(numpy.array([], dtype=u"float32"), padding_value=self.padding_value)
def __str__(self) :
return "ArrayField with shape: {self.array.shape}."
| true
| true
|
1c4169d6afe30d4591c02c39ec558246c822c3b8
| 5,504
|
py
|
Python
|
zerogercrnn/lib/attn.py
|
zerogerc/rnn-autocomplete
|
39dc8dd7c431cb8ac9e15016388ec823771388e4
|
[
"Apache-2.0"
] | 7
|
2019-02-27T09:48:39.000Z
|
2021-11-30T19:01:01.000Z
|
zerogercrnn/lib/attn.py
|
ZeRoGerc/rnn-autocomplete
|
39dc8dd7c431cb8ac9e15016388ec823771388e4
|
[
"Apache-2.0"
] | null | null | null |
zerogercrnn/lib/attn.py
|
ZeRoGerc/rnn-autocomplete
|
39dc8dd7c431cb8ac9e15016388ec823771388e4
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
from zerogercrnn.lib.calculation import drop_matrix_rows_3d, calc_attention_combination
from zerogercrnn.lib.core import BaseModule
from zerogercrnn.lib.utils import init_layers_uniform, get_best_device
class CyclicBuffer:
def __init__(self, buffer):
self.buffer = buffer
self.it = 0
def add_vector(self, vector):
self.buffer[:, self.it, :].copy_(vector) # TODO: general way
self.it += 1
if self.it >= self.buffer.size()[1]:
self.it = 0
def get(self):
return self.buffer
class LastKBuffer:
def __init__(self, window_len, buffer):
assert window_len <= buffer.size()[1]
self.buffer_size = buffer.size()[1]
self.window_len = window_len
self.buffer = buffer
self.it = window_len
def add_vector(self, vector):
self.buffer[:, self.it, :].copy_(vector.detach()) # TODO: general way
self.it += 1
if self.it >= self.buffer_size:
self.buffer.narrow(dim=1, start=0, length=self.window_len).copy_(
self.buffer.narrow(dim=1, start=self.buffer_size - self.window_len, length=self.window_len)
)
self.it = self.window_len
def get(self):
return self.buffer.narrow(dim=1, start=self.it - self.window_len, length=self.window_len)
class Attn(BaseModule):
def __init__(self, method, hidden_size):
super(Attn, self).__init__()
self.method = method
self.hidden_size = hidden_size
if self.method == 'general':
self.attn = nn.Linear(self.hidden_size, self.hidden_size)
init_layers_uniform(-0.05, 0.05, [self.attn])
# elif self.method == 'concat':
# self.attn = nn.Linear(self.hidden_size * 2, hidden_size)
# self.other = nn.Parameter(torch.FloatTensor(1, hidden_size))
# nn.init.uniform(self.attn.parameters(), -0.05, 0.05)
# nn.init.uniform(self.other, -0.05, 0.05)
def forward(self, main_vector, attn_vectors):
"""
:param main_vector: matrix of size [batch_size, N]
:param attn_vectors: matrix of size [batch_size, seq_len, N]
:return:
"""
seq_len = attn_vectors.size()[1]
# Calculate energies for each encoder output
attn_energies = self.score(main_vector, attn_vectors)
return F.softmax(attn_energies, dim=1)
def score(self, main_vector, attn_vectors):
"""
:param main_vector: matrix of size [batch_size, N]
:param attn_vectors: matrix of size [batch_size, seq_len, N]
:return: matrix with attention coefficients of size [batch_size, seq_len, 1]
"""
if self.method == 'dot':
pass # all is ready
elif self.method == 'general':
attn_vectors = self.attn(attn_vectors)
else:
raise Exception('Unknown attention method: {}'.format(self.method))
# main_vector [batch_size, N] -> [batch_size, 1, 1, N]
main_vector = main_vector.unsqueeze(1).unsqueeze(1)
# att_vectors [batch_size, seq_len, N, 1]
attn_vectors = attn_vectors.unsqueeze(3)
# after multiplication [batch_size, seq_len, 1, 1] -> [batch_size, seq_len, 1, 1]
energy = main_vector.matmul(attn_vectors).squeeze(-1)
return energy
# TODO: implement concat
# elif self.method == 'concat':
# energy = self.attn(torch.cat((hidden, encoder_output), 1))
# energy = self.other.dot(energy)
# return energy
class ContextAttention(BaseModule):
"""Attention layer that calculate attention of past seq_len reported inputs to the currently reported input."""
def __init__(self, context_len, hidden_size):
super().__init__()
self.seq_len = context_len
self.hidden_size = hidden_size
self.it = 0
# Layer that applies attention to past self.cntx hidden states of contexts
self.attn = Attn(method='general', hidden_size=self.hidden_size)
# Matrix that will hold past seq_len contexts. No backprop will be computed
# size: [batch_size, seq_len, hidden_size]
self.context_buffer = None
def init_hidden(self, batch_size):
b_matrix = torch.FloatTensor(batch_size, 2 * self.seq_len, self.hidden_size).to(get_best_device())
self.context_buffer = LastKBuffer(window_len=self.seq_len, buffer=b_matrix)
def forget_context_partly(self, forget_vector):
"""Method to drop context for programs that ended.
:param forget_vector vector of size [batch_size, 1] with either 0 or 1
"""
drop_matrix_rows_3d(self.context_buffer.get(), forget_vector)
def forward(self, h_t):
"""
:param h_t: current hidden state of size [batch_size, hidden_size]
:return: hidden state with applied sum attention of size [batch_size, hidden_size]
"""
assert self.context_buffer is not None
current_context = self.context_buffer.get()
attn_weights = self.attn(h_t, current_context)
# self.it += 1
# if self.it % 10000 == 0:
# print(attn_weights.data[0])
# Calc current context vector as sum of previous contexts multiplied by attention coefficients
cntx = calc_attention_combination(attn_weights, current_context)
self.context_buffer.add_vector(h_t)
return cntx
| 36.693333
| 115
| 0.640625
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from zerogercrnn.lib.calculation import drop_matrix_rows_3d, calc_attention_combination
from zerogercrnn.lib.core import BaseModule
from zerogercrnn.lib.utils import init_layers_uniform, get_best_device
class CyclicBuffer:
def __init__(self, buffer):
self.buffer = buffer
self.it = 0
def add_vector(self, vector):
self.buffer[:, self.it, :].copy_(vector)
self.it += 1
if self.it >= self.buffer.size()[1]:
self.it = 0
def get(self):
return self.buffer
class LastKBuffer:
def __init__(self, window_len, buffer):
assert window_len <= buffer.size()[1]
self.buffer_size = buffer.size()[1]
self.window_len = window_len
self.buffer = buffer
self.it = window_len
def add_vector(self, vector):
self.buffer[:, self.it, :].copy_(vector.detach())
self.it += 1
if self.it >= self.buffer_size:
self.buffer.narrow(dim=1, start=0, length=self.window_len).copy_(
self.buffer.narrow(dim=1, start=self.buffer_size - self.window_len, length=self.window_len)
)
self.it = self.window_len
def get(self):
return self.buffer.narrow(dim=1, start=self.it - self.window_len, length=self.window_len)
class Attn(BaseModule):
def __init__(self, method, hidden_size):
super(Attn, self).__init__()
self.method = method
self.hidden_size = hidden_size
if self.method == 'general':
self.attn = nn.Linear(self.hidden_size, self.hidden_size)
init_layers_uniform(-0.05, 0.05, [self.attn])
def forward(self, main_vector, attn_vectors):
seq_len = attn_vectors.size()[1]
attn_energies = self.score(main_vector, attn_vectors)
return F.softmax(attn_energies, dim=1)
def score(self, main_vector, attn_vectors):
if self.method == 'dot':
pass
elif self.method == 'general':
attn_vectors = self.attn(attn_vectors)
else:
raise Exception('Unknown attention method: {}'.format(self.method))
main_vector = main_vector.unsqueeze(1).unsqueeze(1)
attn_vectors = attn_vectors.unsqueeze(3)
energy = main_vector.matmul(attn_vectors).squeeze(-1)
return energy
class ContextAttention(BaseModule):
def __init__(self, context_len, hidden_size):
super().__init__()
self.seq_len = context_len
self.hidden_size = hidden_size
self.it = 0
self.attn = Attn(method='general', hidden_size=self.hidden_size)
self.context_buffer = None
def init_hidden(self, batch_size):
b_matrix = torch.FloatTensor(batch_size, 2 * self.seq_len, self.hidden_size).to(get_best_device())
self.context_buffer = LastKBuffer(window_len=self.seq_len, buffer=b_matrix)
def forget_context_partly(self, forget_vector):
drop_matrix_rows_3d(self.context_buffer.get(), forget_vector)
def forward(self, h_t):
assert self.context_buffer is not None
current_context = self.context_buffer.get()
attn_weights = self.attn(h_t, current_context)
cntx = calc_attention_combination(attn_weights, current_context)
self.context_buffer.add_vector(h_t)
return cntx
| true
| true
|
1c416a7d77536a53872baee1f19eacd7b3113bd7
| 565
|
py
|
Python
|
Screw up/kindleNetwork.py
|
Narcolapser/Little-News-Processor
|
e408ebd05f8e36f139bb413c91c4b831cd1213c7
|
[
"Apache-2.0"
] | 1
|
2016-01-14T15:06:03.000Z
|
2016-01-14T15:06:03.000Z
|
Screw up/kindleNetwork.py
|
Narcolapser/Little-News-Processor
|
e408ebd05f8e36f139bb413c91c4b831cd1213c7
|
[
"Apache-2.0"
] | null | null | null |
Screw up/kindleNetwork.py
|
Narcolapser/Little-News-Processor
|
e408ebd05f8e36f139bb413c91c4b831cd1213c7
|
[
"Apache-2.0"
] | null | null | null |
import bibleIn
import tldrnewsIn
import weatherIn
import bibleProc
import tldrnewsProc
import weatherProc
import textOut
kindle = "/media/toben/Kindle/documents/"
def run():
data = []
print("fectching bible")
data.append(bibleProc.consume(bibleIn.fetch()))
print("done. Fetching news")
data.append(tldrnewsProc.consume(tldrnewsIn.fetch()))
print("done. Fetching weather")
data.append(weatherProc.consume(weatherIn.fetch()))
print("done. outputing")
textOut.put(data,kindle+"dailyNews.txt")
print("Network complete")
if __name__ == "__main__":
run()
| 20.925926
| 54
| 0.755752
|
import bibleIn
import tldrnewsIn
import weatherIn
import bibleProc
import tldrnewsProc
import weatherProc
import textOut
kindle = "/media/toben/Kindle/documents/"
def run():
data = []
print("fectching bible")
data.append(bibleProc.consume(bibleIn.fetch()))
print("done. Fetching news")
data.append(tldrnewsProc.consume(tldrnewsIn.fetch()))
print("done. Fetching weather")
data.append(weatherProc.consume(weatherIn.fetch()))
print("done. outputing")
textOut.put(data,kindle+"dailyNews.txt")
print("Network complete")
if __name__ == "__main__":
run()
| true
| true
|
1c416ad3b001b32caf18c969dccb9449dc1f7c37
| 4,768
|
py
|
Python
|
app/main/views.py
|
Mbaire/Blog-app
|
e1f0fc620f87306c27bb84d84255e2ef186bab4f
|
[
"MIT"
] | null | null | null |
app/main/views.py
|
Mbaire/Blog-app
|
e1f0fc620f87306c27bb84d84255e2ef186bab4f
|
[
"MIT"
] | null | null | null |
app/main/views.py
|
Mbaire/Blog-app
|
e1f0fc620f87306c27bb84d84255e2ef186bab4f
|
[
"MIT"
] | null | null | null |
from flask import (render_template,redirect,url_for,request,flash,abort)
from . import main
from flask_bootstrap import Bootstrap
from flask_login import login_required,current_user
from ..models import User,Blog,Comment
from .forms import UpdateProfile,BlogForm,CommentForm
from .. import db,photos
from ..requests import get_random_quote
import datetime
# Views
@main.route('/')
def index():
blogs = Blog.query.all()
quote = get_random_quote()
'''
View root page function that returns the index page and its data
'''
title = 'Natalie - Blog '
return render_template('index.html', title = title, blogs = blogs,quote = quote)
@main.route('/user/<uname>')
def profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
return render_template("profile/profile.html", user = user)
@main.route('/user/<uname>/update',methods = ['GET','POST'])
@login_required
def update_profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile',uname=user.username))
return render_template('profile/update.html',form =form)
@main.route('/user/<uname>/update/pic',methods= ['POST'])
@login_required
def update_pic(uname):
user = User.query.filter_by(username = uname).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('main.profile',uname=uname))
@main.route('/blog/new', methods = ['GET','POST'])
@login_required
def new_blog():
blog_form = BlogForm()
if blog_form.validate_on_submit():
title = blog_form.title.data
blog = blog_form.text.data
# Updated blog instance
new_blog = Blog(blog_title=title,blog_content=blog,username=current_user.username,likes=0,dislikes=0)
# Save blog method
new_blog.save_blog()
return redirect(url_for('main.index'))
title = 'New blog'
return render_template('new_blog.html',title = title,blog_form=blog_form )
@main.route('/blog/<int:id>', methods = ['GET','POST'])
def blog(id):
blog = Blog.get_blog(id)
posted_date = blog.posted.strftime('%b %d, %Y')
if request.args.get("like"):
blog.likes = blog.likes + 1
db.session.add(blog)
db.session.commit()
return redirect("/blog/{blog_id}".format(blog_id=blog.id))
elif request.args.get("dislike"):
blog.dislikes = blog.dislikes + 1
db.session.add(blog)
db.session.commit()
return redirect("/blog/{blog_id}".format(blog_id=blog.id))
comment_form = CommentForm()
if comment_form.validate_on_submit():
comment = comment_form.text.data
new_comment = Comment(comment = comment,user = current_user,blog_id = blog)
new_comment.save_comment()
comments = Comment.get_comments(blog)
return render_template('blog.html', blog = blog, date = posted_date, comment_form = comment_form, comments = comments)
@main.route('/user/<uname>/blogs')
def user_blogs(uname):
user = User.query.filter_by(username=uname).first()
blogs = Blog.query.filter_by(user_id = user.id).all()
blogs_count = Blog.count_blogs(uname)
user_joined = user.date_joined.strftime('%b,%d,%y')
return render_template("profile/blogs.html",user = user, blogs = blogs, blogs_count= blogs_count,date= user_joined)
@main.route("/blog/<int:id>/update",methods = ['GET','POST'])
@login_required
def update_blog(id):
blog = Blog.query.get_or_404(id)
if blog.username != current_user.username:
abort(403)
blog_form = BlogForm()
if blog_form.validate_on_submit():
blog.blog_title = blog_form.title.data
blog.blog_content = blog_form.text.data
db.session.commit()
flash('Your blog has been updated!', 'success')
return redirect(url_for('main.blog', id=blog.id))
elif request.method == 'GET':
blog_form.title.data = blog.blog_title
blog_form.text.data = blog.blog_content
return render_template('new_blog.html',title = 'Update Blog',blog_form=blog_form )
@main.route("/blog/<int:id>/delete", methods=['POST'])
@login_required
def delete_blog(id):
blog = Blog.query.get_or_404(id)
if blog.username != current_user.username:
abort(403)
db.session.delete(blog)
db.session.commit()
flash('Your post has been deleted!', 'success')
return redirect(url_for('main.index'))
| 31.576159
| 122
| 0.67219
|
from flask import (render_template,redirect,url_for,request,flash,abort)
from . import main
from flask_bootstrap import Bootstrap
from flask_login import login_required,current_user
from ..models import User,Blog,Comment
from .forms import UpdateProfile,BlogForm,CommentForm
from .. import db,photos
from ..requests import get_random_quote
import datetime
@main.route('/')
def index():
blogs = Blog.query.all()
quote = get_random_quote()
title = 'Natalie - Blog '
return render_template('index.html', title = title, blogs = blogs,quote = quote)
@main.route('/user/<uname>')
def profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
return render_template("profile/profile.html", user = user)
@main.route('/user/<uname>/update',methods = ['GET','POST'])
@login_required
def update_profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile',uname=user.username))
return render_template('profile/update.html',form =form)
@main.route('/user/<uname>/update/pic',methods= ['POST'])
@login_required
def update_pic(uname):
user = User.query.filter_by(username = uname).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('main.profile',uname=uname))
@main.route('/blog/new', methods = ['GET','POST'])
@login_required
def new_blog():
blog_form = BlogForm()
if blog_form.validate_on_submit():
title = blog_form.title.data
blog = blog_form.text.data
new_blog = Blog(blog_title=title,blog_content=blog,username=current_user.username,likes=0,dislikes=0)
new_blog.save_blog()
return redirect(url_for('main.index'))
title = 'New blog'
return render_template('new_blog.html',title = title,blog_form=blog_form )
@main.route('/blog/<int:id>', methods = ['GET','POST'])
def blog(id):
blog = Blog.get_blog(id)
posted_date = blog.posted.strftime('%b %d, %Y')
if request.args.get("like"):
blog.likes = blog.likes + 1
db.session.add(blog)
db.session.commit()
return redirect("/blog/{blog_id}".format(blog_id=blog.id))
elif request.args.get("dislike"):
blog.dislikes = blog.dislikes + 1
db.session.add(blog)
db.session.commit()
return redirect("/blog/{blog_id}".format(blog_id=blog.id))
comment_form = CommentForm()
if comment_form.validate_on_submit():
comment = comment_form.text.data
new_comment = Comment(comment = comment,user = current_user,blog_id = blog)
new_comment.save_comment()
comments = Comment.get_comments(blog)
return render_template('blog.html', blog = blog, date = posted_date, comment_form = comment_form, comments = comments)
@main.route('/user/<uname>/blogs')
def user_blogs(uname):
user = User.query.filter_by(username=uname).first()
blogs = Blog.query.filter_by(user_id = user.id).all()
blogs_count = Blog.count_blogs(uname)
user_joined = user.date_joined.strftime('%b,%d,%y')
return render_template("profile/blogs.html",user = user, blogs = blogs, blogs_count= blogs_count,date= user_joined)
@main.route("/blog/<int:id>/update",methods = ['GET','POST'])
@login_required
def update_blog(id):
blog = Blog.query.get_or_404(id)
if blog.username != current_user.username:
abort(403)
blog_form = BlogForm()
if blog_form.validate_on_submit():
blog.blog_title = blog_form.title.data
blog.blog_content = blog_form.text.data
db.session.commit()
flash('Your blog has been updated!', 'success')
return redirect(url_for('main.blog', id=blog.id))
elif request.method == 'GET':
blog_form.title.data = blog.blog_title
blog_form.text.data = blog.blog_content
return render_template('new_blog.html',title = 'Update Blog',blog_form=blog_form )
@main.route("/blog/<int:id>/delete", methods=['POST'])
@login_required
def delete_blog(id):
blog = Blog.query.get_or_404(id)
if blog.username != current_user.username:
abort(403)
db.session.delete(blog)
db.session.commit()
flash('Your post has been deleted!', 'success')
return redirect(url_for('main.index'))
| true
| true
|
1c416b3376e346843d4cd363d7cf4889424efc6a
| 3,515
|
py
|
Python
|
controller_manager_tests/test/multi_cm_dummy.py
|
matthew-reynolds/ros_control
|
43388ac8cc803625412b9cb71afe9e50d07ca472
|
[
"BSD-3-Clause"
] | 375
|
2015-01-15T06:48:33.000Z
|
2022-03-27T13:49:24.000Z
|
controller_manager_tests/test/multi_cm_dummy.py
|
matthew-reynolds/ros_control
|
43388ac8cc803625412b9cb71afe9e50d07ca472
|
[
"BSD-3-Clause"
] | 291
|
2015-01-07T09:39:28.000Z
|
2022-03-28T07:48:03.000Z
|
controller_manager_tests/test/multi_cm_dummy.py
|
matthew-reynolds/ros_control
|
43388ac8cc803625412b9cb71afe9e50d07ca472
|
[
"BSD-3-Clause"
] | 269
|
2015-01-15T14:06:17.000Z
|
2022-03-28T14:50:45.000Z
|
#!/usr/bin/env python
# Copyright (C) 2014, PAL Robotics S.L.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of PAL Robotics S.L. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import rospy
from controller_manager_tests import ControllerManagerDummy
from controller_manager_msgs.msg import ControllerState as CtrlState
from controller_manager_msgs.msg import HardwareInterfaceResources
from controller_manager_msgs.srv import ListControllersResponse, LoadController
if __name__ == '__main__':
rospy.init_node('multi_cm_dummy')
# Valid controller managers in different namespaces
cm_root = ControllerManagerDummy('/')
cm_foo1 = ControllerManagerDummy('/foo/robot/controller_manager1')
cm_foo2 = ControllerManagerDummy('/foo/robot/controller_manager2')
cm_default = ControllerManagerDummy()
ctrl_list = [
CtrlState(name='foo_controller',
state='running',
type='foo_base/foo',
claimed_resources=[
HardwareInterfaceResources(
hardware_interface='hardware_interface::FooInterface',
resources=['one', 'two', 'three'])
]),
CtrlState(name='bar_controller',
state='running',
type='bar_base/bar',
claimed_resources=[
HardwareInterfaceResources(
hardware_interface='hardware_interface::BarInterface',
resources=['four'])
])
]
resp = ListControllersResponse()
resp.controller = ctrl_list
cm_default.list_ctrl_resp = resp
# Partial controller manager ROS API: missing service
cm_incomplete = ControllerManagerDummy('/incomplete')
cm_incomplete.reload_libs.shutdown()
# Partial controller manager ROS API: service with wrong type
cm_bad_type = ControllerManagerDummy('/bad_type')
cm_bad_type.unload_ctrl.shutdown()
cm_bad_type.unload_ctrl = rospy.Service('/bad_type/unload_controller',
LoadController, # NOTE: Wrong type
cm_bad_type._unload_ctrl_cb)
rospy.spin()
| 45.064103
| 79
| 0.712091
|
import rospy
from controller_manager_tests import ControllerManagerDummy
from controller_manager_msgs.msg import ControllerState as CtrlState
from controller_manager_msgs.msg import HardwareInterfaceResources
from controller_manager_msgs.srv import ListControllersResponse, LoadController
if __name__ == '__main__':
rospy.init_node('multi_cm_dummy')
cm_root = ControllerManagerDummy('/')
cm_foo1 = ControllerManagerDummy('/foo/robot/controller_manager1')
cm_foo2 = ControllerManagerDummy('/foo/robot/controller_manager2')
cm_default = ControllerManagerDummy()
ctrl_list = [
CtrlState(name='foo_controller',
state='running',
type='foo_base/foo',
claimed_resources=[
HardwareInterfaceResources(
hardware_interface='hardware_interface::FooInterface',
resources=['one', 'two', 'three'])
]),
CtrlState(name='bar_controller',
state='running',
type='bar_base/bar',
claimed_resources=[
HardwareInterfaceResources(
hardware_interface='hardware_interface::BarInterface',
resources=['four'])
])
]
resp = ListControllersResponse()
resp.controller = ctrl_list
cm_default.list_ctrl_resp = resp
cm_incomplete = ControllerManagerDummy('/incomplete')
cm_incomplete.reload_libs.shutdown()
cm_bad_type = ControllerManagerDummy('/bad_type')
cm_bad_type.unload_ctrl.shutdown()
cm_bad_type.unload_ctrl = rospy.Service('/bad_type/unload_controller',
LoadController,
cm_bad_type._unload_ctrl_cb)
rospy.spin()
| true
| true
|
1c416bb07c0b20dc238752ad979fbb68f6ce530c
| 657
|
py
|
Python
|
metaci/release/migrations/0020_auto_20210920_2127.py
|
sfdc-qbranch/MetaCI
|
78ac0d2bccd2db381998321ebd71029dd5d9ab39
|
[
"BSD-3-Clause"
] | 48
|
2018-10-24T14:52:06.000Z
|
2022-03-25T21:14:50.000Z
|
metaci/release/migrations/0020_auto_20210920_2127.py
|
sfdc-qbranch/MetaCI
|
78ac0d2bccd2db381998321ebd71029dd5d9ab39
|
[
"BSD-3-Clause"
] | 2,034
|
2018-10-31T20:59:16.000Z
|
2022-03-22T21:38:03.000Z
|
metaci/release/migrations/0020_auto_20210920_2127.py
|
sfdc-qbranch/MetaCI
|
78ac0d2bccd2db381998321ebd71029dd5d9ab39
|
[
"BSD-3-Clause"
] | 27
|
2018-12-24T18:16:23.000Z
|
2021-12-15T17:57:27.000Z
|
# Generated by Django 3.1.13 on 2021-09-20 21:27
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("release", "0019_release_cohort"),
]
operations = [
migrations.AlterField(
model_name="release",
name="release_cohort",
field=models.ForeignKey(
blank=True,
default=None,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="releases",
to="release.releasecohort",
),
),
]
| 24.333333
| 60
| 0.550989
|
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("release", "0019_release_cohort"),
]
operations = [
migrations.AlterField(
model_name="release",
name="release_cohort",
field=models.ForeignKey(
blank=True,
default=None,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="releases",
to="release.releasecohort",
),
),
]
| true
| true
|
1c416bf824b73dc45e4d5fe27304f6fcc55f3728
| 4,038
|
py
|
Python
|
scrapy/http/response/text.py
|
Digenis/scrapy
|
a7b86137d03c57cc6a91fd02b56ebb76fa9f4faa
|
[
"BSD-3-Clause"
] | 1
|
2015-08-25T09:42:58.000Z
|
2015-08-25T09:42:58.000Z
|
scrapy/http/response/text.py
|
Digenis/scrapy
|
a7b86137d03c57cc6a91fd02b56ebb76fa9f4faa
|
[
"BSD-3-Clause"
] | null | null | null |
scrapy/http/response/text.py
|
Digenis/scrapy
|
a7b86137d03c57cc6a91fd02b56ebb76fa9f4faa
|
[
"BSD-3-Clause"
] | 1
|
2019-07-17T09:23:13.000Z
|
2019-07-17T09:23:13.000Z
|
"""
This module implements the TextResponse class which adds encoding handling and
discovering (through HTTP headers) to base Response class.
See documentation in docs/topics/request-response.rst
"""
import six
from six.moves.urllib.parse import urljoin
from w3lib.encoding import html_to_unicode, resolve_encoding, \
html_body_declared_encoding, http_content_type_encoding
from scrapy.http.response import Response
from scrapy.utils.response import get_base_url
from scrapy.utils.python import memoizemethod_noargs, to_native_str
class TextResponse(Response):
_DEFAULT_ENCODING = 'ascii'
def __init__(self, *args, **kwargs):
self._encoding = kwargs.pop('encoding', None)
self._cached_benc = None
self._cached_ubody = None
self._cached_selector = None
super(TextResponse, self).__init__(*args, **kwargs)
def _set_url(self, url):
if isinstance(url, six.text_type):
if six.PY2 and self.encoding is None:
raise TypeError("Cannot convert unicode url - %s "
"has no encoding" % type(self).__name__)
self._url = to_native_str(url, self.encoding)
else:
super(TextResponse, self)._set_url(url)
def _set_body(self, body):
self._body = b'' # used by encoding detection
if isinstance(body, six.text_type):
if self._encoding is None:
raise TypeError('Cannot convert unicode body - %s has no encoding' %
type(self).__name__)
self._body = body.encode(self._encoding)
else:
super(TextResponse, self)._set_body(body)
def replace(self, *args, **kwargs):
kwargs.setdefault('encoding', self.encoding)
return Response.replace(self, *args, **kwargs)
@property
def encoding(self):
return self._declared_encoding() or self._body_inferred_encoding()
def _declared_encoding(self):
return self._encoding or self._headers_encoding() \
or self._body_declared_encoding()
def body_as_unicode(self):
"""Return body as unicode"""
# check for self.encoding before _cached_ubody just in
# _body_inferred_encoding is called
benc = self.encoding
if self._cached_ubody is None:
charset = 'charset=%s' % benc
self._cached_ubody = html_to_unicode(charset, self.body)[1]
return self._cached_ubody
def urljoin(self, url):
"""Join this Response's url with a possible relative url to form an
absolute interpretation of the latter."""
return urljoin(get_base_url(self), url)
@memoizemethod_noargs
def _headers_encoding(self):
content_type = self.headers.get(b'Content-Type', b'')
return http_content_type_encoding(to_native_str(content_type))
def _body_inferred_encoding(self):
if self._cached_benc is None:
content_type = to_native_str(self.headers.get(b'Content-Type', b''))
benc, ubody = html_to_unicode(content_type, self.body,
auto_detect_fun=self._auto_detect_fun,
default_encoding=self._DEFAULT_ENCODING)
self._cached_benc = benc
self._cached_ubody = ubody
return self._cached_benc
def _auto_detect_fun(self, text):
for enc in (self._DEFAULT_ENCODING, 'utf-8', 'cp1252'):
try:
text.decode(enc)
except UnicodeError:
continue
return resolve_encoding(enc)
@memoizemethod_noargs
def _body_declared_encoding(self):
return html_body_declared_encoding(self.body)
@property
def selector(self):
from scrapy.selector import Selector
if self._cached_selector is None:
self._cached_selector = Selector(self)
return self._cached_selector
def xpath(self, query):
return self.selector.xpath(query)
def css(self, query):
return self.selector.css(query)
| 35.421053
| 84
| 0.65478
|
import six
from six.moves.urllib.parse import urljoin
from w3lib.encoding import html_to_unicode, resolve_encoding, \
html_body_declared_encoding, http_content_type_encoding
from scrapy.http.response import Response
from scrapy.utils.response import get_base_url
from scrapy.utils.python import memoizemethod_noargs, to_native_str
class TextResponse(Response):
_DEFAULT_ENCODING = 'ascii'
def __init__(self, *args, **kwargs):
self._encoding = kwargs.pop('encoding', None)
self._cached_benc = None
self._cached_ubody = None
self._cached_selector = None
super(TextResponse, self).__init__(*args, **kwargs)
def _set_url(self, url):
if isinstance(url, six.text_type):
if six.PY2 and self.encoding is None:
raise TypeError("Cannot convert unicode url - %s "
"has no encoding" % type(self).__name__)
self._url = to_native_str(url, self.encoding)
else:
super(TextResponse, self)._set_url(url)
def _set_body(self, body):
self._body = b''
if isinstance(body, six.text_type):
if self._encoding is None:
raise TypeError('Cannot convert unicode body - %s has no encoding' %
type(self).__name__)
self._body = body.encode(self._encoding)
else:
super(TextResponse, self)._set_body(body)
def replace(self, *args, **kwargs):
kwargs.setdefault('encoding', self.encoding)
return Response.replace(self, *args, **kwargs)
@property
def encoding(self):
return self._declared_encoding() or self._body_inferred_encoding()
def _declared_encoding(self):
return self._encoding or self._headers_encoding() \
or self._body_declared_encoding()
def body_as_unicode(self):
benc = self.encoding
if self._cached_ubody is None:
charset = 'charset=%s' % benc
self._cached_ubody = html_to_unicode(charset, self.body)[1]
return self._cached_ubody
def urljoin(self, url):
return urljoin(get_base_url(self), url)
@memoizemethod_noargs
def _headers_encoding(self):
content_type = self.headers.get(b'Content-Type', b'')
return http_content_type_encoding(to_native_str(content_type))
def _body_inferred_encoding(self):
if self._cached_benc is None:
content_type = to_native_str(self.headers.get(b'Content-Type', b''))
benc, ubody = html_to_unicode(content_type, self.body,
auto_detect_fun=self._auto_detect_fun,
default_encoding=self._DEFAULT_ENCODING)
self._cached_benc = benc
self._cached_ubody = ubody
return self._cached_benc
def _auto_detect_fun(self, text):
for enc in (self._DEFAULT_ENCODING, 'utf-8', 'cp1252'):
try:
text.decode(enc)
except UnicodeError:
continue
return resolve_encoding(enc)
@memoizemethod_noargs
def _body_declared_encoding(self):
return html_body_declared_encoding(self.body)
@property
def selector(self):
from scrapy.selector import Selector
if self._cached_selector is None:
self._cached_selector = Selector(self)
return self._cached_selector
def xpath(self, query):
return self.selector.xpath(query)
def css(self, query):
return self.selector.css(query)
| true
| true
|
1c416c30ca7e1171cf6de53fb05f0ececb547c54
| 434
|
py
|
Python
|
accounts/migrations/0002_alter_account_id.py
|
shaymk1/Felicia-s-ecommerce-store
|
aaf9d5aed018e451602c6c39bf8e5e24f9cedc01
|
[
"MIT"
] | null | null | null |
accounts/migrations/0002_alter_account_id.py
|
shaymk1/Felicia-s-ecommerce-store
|
aaf9d5aed018e451602c6c39bf8e5e24f9cedc01
|
[
"MIT"
] | null | null | null |
accounts/migrations/0002_alter_account_id.py
|
shaymk1/Felicia-s-ecommerce-store
|
aaf9d5aed018e451602c6c39bf8e5e24f9cedc01
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.5 on 2021-07-22 14:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='account',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
| 22.842105
| 111
| 0.617512
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='account',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
| true
| true
|
1c416c46aac908f7df53aab0aa8bef175ade8e4c
| 149
|
py
|
Python
|
test/test_queue.py
|
shivankurkapoor/moleculardating
|
4a72c3e92a09ab321e0d92840cc7619857bbab8a
|
[
"BSD-3-Clause"
] | 1
|
2018-04-24T04:38:33.000Z
|
2018-04-24T04:38:33.000Z
|
test/test_queue.py
|
shivankurkapoor/molecular-dating
|
4a72c3e92a09ab321e0d92840cc7619857bbab8a
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_queue.py
|
shivankurkapoor/molecular-dating
|
4a72c3e92a09ab321e0d92840cc7619857bbab8a
|
[
"BSD-3-Clause"
] | null | null | null |
import queue.qscript_utility
import sys
queue.qscript_utility.submit_job_into_queue("single", sys.argv[1], "%s %s %s" % ("fenv", sys.argv[1], "0"))
| 29.8
| 107
| 0.711409
|
import queue.qscript_utility
import sys
queue.qscript_utility.submit_job_into_queue("single", sys.argv[1], "%s %s %s" % ("fenv", sys.argv[1], "0"))
| true
| true
|
1c416d6019ab4b179bb84736851d12f87e2ce72f
| 712
|
py
|
Python
|
Code/INA219/INA219.py
|
OlegErmolaev/INA219_servo
|
a7e449e9d6d48b1a89283a08effb8016866ae497
|
[
"MIT"
] | null | null | null |
Code/INA219/INA219.py
|
OlegErmolaev/INA219_servo
|
a7e449e9d6d48b1a89283a08effb8016866ae497
|
[
"MIT"
] | null | null | null |
Code/INA219/INA219.py
|
OlegErmolaev/INA219_servo
|
a7e449e9d6d48b1a89283a08effb8016866ae497
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from ina219 import INA219
from ina219 import DeviceRangeError
import time
SHUNT_OHMS = 0.05
def read():
ina = INA219(SHUNT_OHMS)
ina.configure(ina.RANGE_16V)
print("Bus Voltage: %.3f V" % ina.voltage())
try:
print("Bus Current: %.3f mA" % ina.current())
print("Power: %.3f mW" % ina.power())
print("Shunt voltage: %.3f mV" % ina.shunt_voltage())
print()
except DeviceRangeError as e:
# Current out of device range with specified shunt resistor
print(e)
if __name__ == "__main__":
try:
while True:
read()
time.sleep(0.5)
except KeyboardInterrupt:
print('Ctrl+C pressed')
| 24.551724
| 67
| 0.605337
|
from ina219 import INA219
from ina219 import DeviceRangeError
import time
SHUNT_OHMS = 0.05
def read():
ina = INA219(SHUNT_OHMS)
ina.configure(ina.RANGE_16V)
print("Bus Voltage: %.3f V" % ina.voltage())
try:
print("Bus Current: %.3f mA" % ina.current())
print("Power: %.3f mW" % ina.power())
print("Shunt voltage: %.3f mV" % ina.shunt_voltage())
print()
except DeviceRangeError as e:
print(e)
if __name__ == "__main__":
try:
while True:
read()
time.sleep(0.5)
except KeyboardInterrupt:
print('Ctrl+C pressed')
| true
| true
|
1c416ead727ca5614fba355f9150c4c908701582
| 27,350
|
py
|
Python
|
glitter2/analysis/export_widget.py
|
matham/glitter2
|
ebede5a18edb1b2e34f1824e4262d01a148cf2f3
|
[
"MIT"
] | null | null | null |
glitter2/analysis/export_widget.py
|
matham/glitter2
|
ebede5a18edb1b2e34f1824e4262d01a148cf2f3
|
[
"MIT"
] | null | null | null |
glitter2/analysis/export_widget.py
|
matham/glitter2
|
ebede5a18edb1b2e34f1824e4262d01a148cf2f3
|
[
"MIT"
] | null | null | null |
from typing import Type, Optional, Dict, List, Any, Tuple, Union, Callable
from functools import partial
from bisect import insort_left
from os.path import exists, join
from kivy.factory import Factory
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.label import Label
from kivy.uix.widget import Widget
from kivy.properties import DictProperty, ListProperty, StringProperty, \
ObjectProperty, BooleanProperty
from kivy.clock import Clock
from kivy.app import App
from more_kivy_app.app import app_error
from glitter2.analysis import AnalysisFactory, FileDataAnalysis, \
AnalysisChannel, AnalysisSpec, default_value
from glitter2.utils import fix_name
def _sort_dict(d: dict) -> List[tuple]:
return list(sorted(d.items(), key=lambda x: x[0]))
class ExportStatsSelection(BoxLayout):
template_filename: str = ''
template_file: Optional[FileDataAnalysis] = None
template_data_file: Optional[FileDataAnalysis] = None
global_parameters: BoxLayout = None
local_parameters: BoxLayout = None
new_channels_widget: BoxLayout = None
compute_methods_widget: BoxLayout = None
new_channel_methods: Dict[str, List[str]] = DictProperty()
compute_methods: Dict[str, List[str]] = DictProperty()
methods_class: Dict[Tuple[str, str], Type[AnalysisChannel]] = {}
_update_name_width_trigger = []
src_event_channel_names: List[str] = []
src_pos_channel_names: List[str] = []
src_zone_channel_names: List[str] = []
event_channel_names: List[str] = ListProperty()
pos_channel_names: List[str] = ListProperty()
zone_channel_names: List[str] = ListProperty()
widgets_using_channel_name: Dict[
str, List['SpinnerTextContextBehavior']] = {}
def __init__(self, **kwargs):
self.new_channel_methods = {'event': [], 'pos': [], 'zone': []}
self.compute_methods = {'event': [], 'pos': [], 'zone': []}
super().__init__(**kwargs)
self.src_event_channel_names = []
self.src_pos_channel_names = []
self.src_zone_channel_names = []
self.widgets_using_channel_name = {}
def get_template_file_setter(self, callback):
def inner(paths):
callback(paths)
if paths:
self.set_template_file(paths[0])
return inner
@app_error
def set_template_file(self, filename):
self.template_filename = filename
if self.template_file is not None:
self.template_file.close_data_file()
self.template_file = None
if self.template_data_file is not None:
self.template_data_file.close_data_file()
self.template_data_file = None
# reset all the places where we refer to any of the original channels
src_names = set(
self.src_event_channel_names + self.src_pos_channel_names +
self.src_zone_channel_names)
widgets_using_channel_name = self.widgets_using_channel_name
for name, widgets in list(widgets_using_channel_name.items()):
# only clear when it refers to a source channel, not a manually
# created channel
if name not in src_names:
continue
for widget in widgets:
widget.spinner_item_reference_was_removed()
del widgets_using_channel_name[name]
if not filename:
self.src_event_channel_names = []
self.src_pos_channel_names = []
self.src_zone_channel_names = []
self.refresh_channel_names(-1)
return
try:
template = self.template_file = FileDataAnalysis(filename=filename)
template.open_data_file()
template.load_file_metadata()
self.src_event_channel_names = list(template.event_channels_data)
self.src_pos_channel_names = list(template.pos_channels_data)
self.src_zone_channel_names = list(template.zone_channels_shapes)
except Exception:
self.src_event_channel_names = []
self.src_pos_channel_names = []
self.src_zone_channel_names = []
self.refresh_channel_names(-1)
self.template_filename = ''
if self.template_file is not None:
self.template_file.close_data_file()
self.template_file = None
raise
self.refresh_channel_names(-1)
def show_analysis_options(self):
triggers = self._update_name_width_trigger = []
for container in [
self.global_parameters, self.local_parameters,
self.new_channels_widget, self.compute_methods_widget]:
triggers.append(Clock.create_trigger(
partial(self._update_name_width, container), 0))
self._show_global_variables()
self._show_local_variables()
self.get_compute_options()
def _update_name_width(self, container, *args):
children = container.children
max_width = max(
widget.name_label.texture_size[0] for widget in children)
for widget in children:
widget.name_label.parent.width = max_width
def _show_global_variables(self):
container = self.global_parameters
global_vars = AnalysisFactory.get_variables(local_vars=False)
trigger = self._update_name_width_trigger[0]
for name, (classes, doc, (var_type, _), special_type) in _sort_dict(
global_vars):
widget = VariableDocWithDefault(
name=name, classes=classes, doc=doc, var_type=var_type,
special_type=special_type
)
container.add_widget(widget)
widget.name_label.fbind('texture_size', trigger)
trigger()
def _show_local_variables(self):
container = self.local_parameters
local_vars = AnalysisFactory.get_variables(global_vars=False)
trigger = self._update_name_width_trigger[1]
for name, (classes, doc, (var_type, _), special_type) in _sort_dict(
local_vars):
widget = VariableDocNoDefault(
name=name, classes=classes, doc=doc, var_type=var_type,
special_type=special_type
)
container.add_widget(widget)
widget.name_label.fbind('texture_size', trigger)
trigger()
def get_compute_options(self):
new_methods = {}
methods_class = {}
for channel_type in ('event', 'pos', 'zone'):
items = AnalysisFactory.get_channel_creating_methods_from_type(
channel_type)
new_methods[channel_type] = list(sorted(items))
methods_class.update({
(channel_type, name): cls
for name, (cls, _, _) in items.items()
})
self.new_channel_methods = new_methods
compute_methods = {}
for channel_type in ('event', 'pos', 'zone'):
items = AnalysisFactory.get_compute_methods_from_type(channel_type)
compute_methods[channel_type] = list(sorted(items))
methods_class.update({
(channel_type, name): cls
for name, (cls, _, _) in items.items()
})
self.compute_methods = compute_methods
self.methods_class = methods_class
def add_new_channel_computation(self, channel_type, method_name):
cls = self.methods_class[(channel_type, method_name)]
doc, ret_type, create_type, variables = \
AnalysisFactory.get_channel_creating_method_spec(cls, method_name)
widget = ComputeNewChannelWidget(
export_stats=self, channel_type=channel_type, method_class=cls,
name=method_name, doc=doc, create_type=create_type,
variables=variables)
self.new_channels_widget.add_widget(widget)
self.refresh_channel_names(len(self.new_channels_widget.children) - 1)
trigger = self._update_name_width_trigger[2]
widget.name_label.fbind('texture_size', trigger)
trigger()
def add_computation(self, channel_type, method_name):
cls = self.methods_class[(channel_type, method_name)]
doc, ret_type, variables = \
AnalysisFactory.get_compute_method_spec(cls, method_name)
widget = ComputeMethodWidget(
export_stats=self, channel_type=channel_type, method_class=cls,
name=method_name, doc=doc, ret_type=ret_type, variables=variables)
self.compute_methods_widget.add_widget(widget)
trigger = self._update_name_width_trigger[3]
widget.name_label.fbind('texture_size', trigger)
trigger()
def delete_new_channel(self, widget: 'ComputeNewChannelWidget'):
index = self.new_channels_widget.children[::-1].index(widget)
self.new_channels_widget.remove_widget(widget)
widget.delete_method()
old_name = widget.new_channel_name
if not old_name:
return
# if old name was empty it'd just be an empty list
widgets_using_name = self.widgets_using_channel_name.pop(old_name, [])
for user in widgets_using_name:
user.spinner_item_reference_was_removed()
self.refresh_channel_names(index)
def delete_computation(self, widget: 'ComputeMethodWidget'):
self.compute_methods_widget.remove_widget(widget)
widget.delete_method()
def update_new_channel_name(
self, widget: 'ComputeNewChannelWidget', name: str
) -> str:
old_name = widget.new_channel_name
if old_name == name:
return name
if name:
# make sure channel with name doesn't exist yet
name = fix_name(
name, self.event_channel_names, self.pos_channel_names,
self.zone_channel_names)
widget.new_channel_name = name
# if old name was empty it'd just be an empty list
widgets_using_name = self.widgets_using_channel_name.get(old_name, [])
for user in widgets_using_name:
if name:
user.spinner_item_reference_was_renamed(name)
else:
user.spinner_item_reference_was_removed()
if name:
# there won't be widgets using the new name - we fixed it
self.widgets_using_channel_name[name] = widgets_using_name
if old_name:
self.widgets_using_channel_name[old_name] = []
index = self.new_channels_widget.children[::-1].index(widget)
self.refresh_channel_names(index)
return name
def refresh_channel_names(self, index: int):
widgets = self.new_channels_widget.children[::-1]
if index >= 0:
# always start from previous widget in case it was deleted
index -= 1
if index == -1 or not widgets:
channels = {
'event': self.src_event_channel_names,
'pos': self.src_pos_channel_names,
'zone': self.src_zone_channel_names
}
index = 0
else:
widget: Optional['ComputeWidgetBase'] = widgets[index]
channels = {
'event': widget.event_channel_names,
'pos': widget.pos_channel_names,
'zone': widget.zone_channel_names
}
if widget.new_channel_name:
items = channels[widget.create_type] = list(
channels[widget.create_type])
insort_left(items, widget.new_channel_name)
index = widgets.index(widget) + 1
for current_widget in widgets[index:]:
for create_type, items in channels.items():
setattr(current_widget, f'{create_type}_channel_names', items)
create_type = current_widget.create_type
if current_widget.new_channel_name:
items = channels[create_type] = list(channels[create_type])
insort_left(items, current_widget.new_channel_name)
for create_type, items in channels.items():
setattr(self, f'{create_type}_channel_names', items)
def use_name_of_other_channel(self, widget, old_name: str, name: str):
if old_name == name:
return
widgets_using_name = self.widgets_using_channel_name.get(old_name, [])
if widget in widgets_using_name:
widgets_using_name.remove(widget)
if name:
if name not in self.widgets_using_channel_name:
self.widgets_using_channel_name[name] = []
self.widgets_using_channel_name[name].append(widget)
def get_analysis_spec(self) -> AnalysisSpec:
spec = AnalysisSpec()
# add default variables
default_variable: VariableDocWithDefault
for default_variable in self.global_parameters.children[::-1]:
for cls in default_variable.classes:
spec.add_arg_default(
cls, default_variable.name, default_variable.value)
# add new channel methods
new_channel: ComputeNewChannelWidget
for new_channel in self.new_channels_widget.children[::-1]:
if not new_channel.new_channel_name:
raise ValueError(
'no name given for the new channel created by '
f'"{new_channel.name}"')
if not new_channel.compute_channel:
raise ValueError(
'no channel was selected for the new channel created by '
f'"{new_channel.name}"')
variables = new_channel.get_variable_values()
method = getattr(
new_channel.method_class, f'compute_{new_channel.name}')
spec.add_new_channel_computation(
new_channel.compute_channel, new_channel.new_channel_name,
method, **variables)
# add computation methods
computation: ComputeMethodWidget
for computation in self.compute_methods_widget.children[::-1]:
if not computation.compute_channels:
raise ValueError(
'no channel(s) ware selected for the computation by '
f'"{computation.name}"')
variables = computation.get_variable_values()
method = getattr(
computation.method_class, f'compute_{computation.name}')
spec.add_computation(
computation.compute_channels, method,
compute_key=computation.export_key, **variables)
return spec
class VariableDocBase(BoxLayout):
name = ''
classes = []
doc = ''
var_type = None
special_type = ''
is_global = False
name_label: Label = None
def __init__(
self, name, classes, doc, var_type, special_type, **kwargs):
self.name = name
self.classes = classes
self.doc = doc
self.var_type = var_type
self.special_type = special_type
super().__init__(**kwargs)
class VariableDocWithDefault(VariableDocBase):
value = None
is_global = True
def set_value(self, widget, value):
self.value = value
def add_variable_widget(self, container):
widget = get_variable_widget(
None, self.var_type, self.special_type, self.set_value,
optional=True, default=True
)
if widget is not None:
container.add_widget(widget)
class VariableDocNoDefault(VariableDocBase):
pass
class ComputeWidgetBase(BoxLayout):
channel_type = ''
method_class = None
name = ''
doc = ''
variables = []
export_stats: ExportStatsSelection = None
variables_container = None
variables_values: Dict[str, Any] = {}
is_variable_optional: Dict[str, bool] = {}
def __init__(
self, export_stats: ExportStatsSelection, channel_type,
method_class, name, doc, variables, **kwargs):
self.export_stats = export_stats
self.channel_type = channel_type
self.method_class = method_class
self.name = name
self.doc = doc
self.variables = variables
self.variables_values = {}
self.is_variable_optional = {}
super().__init__(**kwargs)
self.show_variables()
def get_variable_callback(self, name, widget, value):
self.variables_values[name] = value
@property
def channel_names_context(self):
return self
def show_variables(self):
container = self.variables_container
variables_values = self.variables_values
is_variable_optional = self.is_variable_optional
for name, ((var_type, optional), special_arg) in \
_sort_dict(self.variables):
variables_values[name] = None
is_variable_optional[name] = optional
widget = get_variable_widget(
self.channel_names_context, var_type, special_arg,
partial(self.get_variable_callback, name), optional=optional,
default=False)
if widget is None:
continue
container.add_widget(
Factory.MinSizeYFlatLabel(text=f'{name}:', bold=True))
container.add_widget(widget)
def get_variable_values(self) -> Dict[str, Any]:
# right now, None mean not set. TODO: use default_value for that case
variables_values = self.variables_values
variables = self.variables
values = {}
for name, value in variables_values.items():
if value is None:
value = default_value
(var_type, optional), special_arg = variables[name]
if value is default_value and not optional:
raise ValueError(
f'Argument "{name}" for method "{self.name}" for channel '
f'type "{self.channel_type}" requires a value but none '
f'was given')
values[name] = value
return values
def delete_method(self):
# remove_variables_tracking_channel_names
variables = _sort_dict(self.variables)
widgets = self.variables_container.children[::-1][1::2]
for widget, (name, (_, special_arg)) in zip(widgets, variables):
if isinstance(widget, SpinnerListFromContext):
widget.remove_widget_refs()
elif special_arg:
assert isinstance(
widget, Factory.VariableContextSpinner), widget
widget.remove_reference_to_spinner_item()
class ComputeNewChannelWidget(ComputeWidgetBase):
event_channel_names: List[str] = ListProperty()
"""Channels available so far."""
pos_channel_names: List[str] = ListProperty()
zone_channel_names: List[str] = ListProperty()
new_channel_name: str = ''
compute_channel: Optional[str] = ''
channel_selector: 'SpinnerTextContextBehavior' = None
def __init__(self, create_type, **kwargs):
self.create_type = create_type
super().__init__(**kwargs)
def delete_method(self):
super().delete_method()
if self.channel_selector is not None:
self.channel_selector.remove_reference_to_spinner_item()
class ComputeMethodWidget(ComputeWidgetBase):
export_key: str = ''
compute_channels: Optional[List[str]] = []
channel_selector: 'SpinnerListFromContext' = None
def __init__(self, ret_type, **kwargs):
self.ret_type = ret_type
self.compute_channels = []
super().__init__(**kwargs)
@property
def channel_names_context(self):
return self.export_stats
def delete_method(self):
super().delete_method()
if self.channel_selector is not None:
self.channel_selector.remove_widget_refs()
# TODO: add common inheritance for all variables, global, local, etc
# TODO: add way to distinguish when variable is default vs None
def get_variable_widget(
context, var_type, special_arg, callback, optional=True, default=True):
def set_text_value(value):
if not value:
value = None
elif var_type == float:
value = float(value)
elif var_type == int:
value = int(value)
callback(widget, value)
kwargs = {}
optional_text = '(optional) ' if optional else 'required '
default_text = 'default ' if default else ''
hint = optional_text + default_text + 'value'
if special_arg:
if default:
# these don't have a default
return None
include_none_in_values = default or optional
if var_type == str:
widget = Factory.VariableContextSpinner(
variable_context=context,
variable_name=f'{special_arg}_channel_names',
variable_value_callback=callback,
include_none_in_values=include_none_in_values)
elif var_type == List[str]:
widget = SpinnerListFromContext(
variable_context=context,
variable_name=f'{special_arg}_channel_names',
variable_value_callback=callback,
include_none_in_values=include_none_in_values)
else:
assert False, (special_arg, var_type)
return widget
if var_type == float:
cls = Factory.ExportVariableTextInput
kwargs['input_filter'] = 'float'
kwargs['hint_text'] = hint
cb = set_text_value
elif var_type == int:
cls = Factory.ExportVariableTextInput
kwargs['input_filter'] = 'int'
kwargs['hint_text'] = hint
cb = set_text_value
elif var_type == str:
cls = Factory.ExportVariableTextInput
kwargs['input_filter'] = None
kwargs['hint_text'] = hint
cb = set_text_value
else:
return None
widget = cls(**kwargs)
widget.value_callback = cb
return widget
class ValueCallbackBehavior:
variable_value_callback: Optional[Callable] = None
# TODO: Make default use default_value and differentiate None/default
variable_value = ObjectProperty(None, allownone=True)
def __init__(
self, variable_value_callback: Optional[Callable] = None, **kwargs):
self.variable_value_callback = variable_value_callback
super().__init__(**kwargs)
self.fbind('variable_value', self._track_variable_value)
self._track_variable_value()
def _track_variable_value(self, *args):
if self.variable_value_callback is not None:
self.variable_value_callback(self, self.variable_value)
class SpinnerTextCallbackBehavior(ValueCallbackBehavior):
def _track_variable_value(self, *args):
value = self.variable_value
if value == '<none>' or not value:
value = None
if self.variable_value_callback is not None:
self.variable_value_callback(self, value)
class SpinnerFromContextBehavior:
"""Must have a values property (not set here as subclass likely has it).
"""
variable_context = None
variable_name = ''
include_none_in_values = False
def __init__(
self, variable_context=None, variable_name: str = '',
include_none_in_values=False, **kwargs):
self.variable_context = variable_context
self.variable_name = variable_name
self.include_none_in_values = include_none_in_values
super().__init__(**kwargs)
Clock.schedule_once(self.bind_variable_values_from_context)
def bind_variable_values_from_context(self, *args):
self.variable_context.fbind(
self.variable_name, self._update_values_from_context)
self._update_values_from_context()
def _update_values_from_context(self, *args):
values = getattr(self.variable_context, self.variable_name)
if self.include_none_in_values:
self.values = ['<none>'] + values
else:
self.values = values
class SpinnerTextContextBehavior:
"""Must have a text property.
It only updates the text property.
"""
export_stats: ExportStatsSelection = None
last_text = ''
def __init__(self, **kwargs):
self.export_stats = App.get_running_app().export_stats
super().__init__(**kwargs)
# register it if it has a name already
self.change_spinner_item_referenced()
def spinner_item_reference_was_removed(self):
self.last_text = '<none>'
self.text = '<none>'
def spinner_item_reference_was_renamed(self, new_name):
self.last_text = new_name
self.text = new_name
def change_spinner_item_referenced(self):
last_text = self.last_text if self.last_text != '<none>' else ''
text = self.text if self.text != '<none>' else ''
self.export_stats.use_name_of_other_channel(self, last_text, text)
self.last_text = text
def remove_reference_to_spinner_item(self):
last_text = self.last_text if self.last_text != '<none>' else ''
self.export_stats.use_name_of_other_channel(self, last_text, '')
self.last_text = ''
class SpinnerChannelNameItem(SpinnerTextContextBehavior, BoxLayout):
text = StringProperty('')
bold = BooleanProperty(False)
def spinner_item_reference_was_removed(self):
super().spinner_item_reference_was_removed()
self.parent.remove_widget(self)
def remove_reference_to_spinner_item(self):
super().remove_reference_to_spinner_item()
self.parent.remove_widget(self)
class SpinnerListFromContext(
SpinnerFromContextBehavior, ValueCallbackBehavior, BoxLayout):
values: List[str] = ListProperty()
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.fbind('values', self.update_shown_values)
def update_values_from_children(self):
children = self.ids.channel_selectors.children[::-1]
# todo: use default_value
self.variable_value = [c.text for c in children] or None
self.update_shown_values()
def remove_widget_refs(self):
self.ids.channel_selector.remove_reference_to_spinner_item()
widget: SpinnerTextContextBehavior
for widget in self.ids.channel_selectors.children:
widget.remove_reference_to_spinner_item()
def update_shown_values(self, *args):
spinner = self.ids.channel_selector
used_values = self.variable_value or []
values = [v for v in self.values if v not in used_values]
spinner.values = values
if spinner.text == '<none>' or not values or values == ['<none>']:
return
if spinner.text not in used_values:
return
if values[0] == '<none>':
spinner.text = values[1]
else:
spinner.text = values[0]
Factory.register('ValueCallbackBehavior', cls=ValueCallbackBehavior)
Factory.register(
'SpinnerTextCallbackBehavior', cls=SpinnerTextCallbackBehavior)
Factory.register('SpinnerFromContextBehavior', cls=SpinnerFromContextBehavior)
Factory.register(
'SpinnerTextContextBehavior',
cls=SpinnerTextContextBehavior)
| 33.031401
| 80
| 0.643108
|
from typing import Type, Optional, Dict, List, Any, Tuple, Union, Callable
from functools import partial
from bisect import insort_left
from os.path import exists, join
from kivy.factory import Factory
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.label import Label
from kivy.uix.widget import Widget
from kivy.properties import DictProperty, ListProperty, StringProperty, \
ObjectProperty, BooleanProperty
from kivy.clock import Clock
from kivy.app import App
from more_kivy_app.app import app_error
from glitter2.analysis import AnalysisFactory, FileDataAnalysis, \
AnalysisChannel, AnalysisSpec, default_value
from glitter2.utils import fix_name
def _sort_dict(d: dict) -> List[tuple]:
return list(sorted(d.items(), key=lambda x: x[0]))
class ExportStatsSelection(BoxLayout):
template_filename: str = ''
template_file: Optional[FileDataAnalysis] = None
template_data_file: Optional[FileDataAnalysis] = None
global_parameters: BoxLayout = None
local_parameters: BoxLayout = None
new_channels_widget: BoxLayout = None
compute_methods_widget: BoxLayout = None
new_channel_methods: Dict[str, List[str]] = DictProperty()
compute_methods: Dict[str, List[str]] = DictProperty()
methods_class: Dict[Tuple[str, str], Type[AnalysisChannel]] = {}
_update_name_width_trigger = []
src_event_channel_names: List[str] = []
src_pos_channel_names: List[str] = []
src_zone_channel_names: List[str] = []
event_channel_names: List[str] = ListProperty()
pos_channel_names: List[str] = ListProperty()
zone_channel_names: List[str] = ListProperty()
widgets_using_channel_name: Dict[
str, List['SpinnerTextContextBehavior']] = {}
def __init__(self, **kwargs):
self.new_channel_methods = {'event': [], 'pos': [], 'zone': []}
self.compute_methods = {'event': [], 'pos': [], 'zone': []}
super().__init__(**kwargs)
self.src_event_channel_names = []
self.src_pos_channel_names = []
self.src_zone_channel_names = []
self.widgets_using_channel_name = {}
def get_template_file_setter(self, callback):
def inner(paths):
callback(paths)
if paths:
self.set_template_file(paths[0])
return inner
@app_error
def set_template_file(self, filename):
self.template_filename = filename
if self.template_file is not None:
self.template_file.close_data_file()
self.template_file = None
if self.template_data_file is not None:
self.template_data_file.close_data_file()
self.template_data_file = None
src_names = set(
self.src_event_channel_names + self.src_pos_channel_names +
self.src_zone_channel_names)
widgets_using_channel_name = self.widgets_using_channel_name
for name, widgets in list(widgets_using_channel_name.items()):
if name not in src_names:
continue
for widget in widgets:
widget.spinner_item_reference_was_removed()
del widgets_using_channel_name[name]
if not filename:
self.src_event_channel_names = []
self.src_pos_channel_names = []
self.src_zone_channel_names = []
self.refresh_channel_names(-1)
return
try:
template = self.template_file = FileDataAnalysis(filename=filename)
template.open_data_file()
template.load_file_metadata()
self.src_event_channel_names = list(template.event_channels_data)
self.src_pos_channel_names = list(template.pos_channels_data)
self.src_zone_channel_names = list(template.zone_channels_shapes)
except Exception:
self.src_event_channel_names = []
self.src_pos_channel_names = []
self.src_zone_channel_names = []
self.refresh_channel_names(-1)
self.template_filename = ''
if self.template_file is not None:
self.template_file.close_data_file()
self.template_file = None
raise
self.refresh_channel_names(-1)
def show_analysis_options(self):
triggers = self._update_name_width_trigger = []
for container in [
self.global_parameters, self.local_parameters,
self.new_channels_widget, self.compute_methods_widget]:
triggers.append(Clock.create_trigger(
partial(self._update_name_width, container), 0))
self._show_global_variables()
self._show_local_variables()
self.get_compute_options()
def _update_name_width(self, container, *args):
children = container.children
max_width = max(
widget.name_label.texture_size[0] for widget in children)
for widget in children:
widget.name_label.parent.width = max_width
def _show_global_variables(self):
container = self.global_parameters
global_vars = AnalysisFactory.get_variables(local_vars=False)
trigger = self._update_name_width_trigger[0]
for name, (classes, doc, (var_type, _), special_type) in _sort_dict(
global_vars):
widget = VariableDocWithDefault(
name=name, classes=classes, doc=doc, var_type=var_type,
special_type=special_type
)
container.add_widget(widget)
widget.name_label.fbind('texture_size', trigger)
trigger()
def _show_local_variables(self):
container = self.local_parameters
local_vars = AnalysisFactory.get_variables(global_vars=False)
trigger = self._update_name_width_trigger[1]
for name, (classes, doc, (var_type, _), special_type) in _sort_dict(
local_vars):
widget = VariableDocNoDefault(
name=name, classes=classes, doc=doc, var_type=var_type,
special_type=special_type
)
container.add_widget(widget)
widget.name_label.fbind('texture_size', trigger)
trigger()
def get_compute_options(self):
new_methods = {}
methods_class = {}
for channel_type in ('event', 'pos', 'zone'):
items = AnalysisFactory.get_channel_creating_methods_from_type(
channel_type)
new_methods[channel_type] = list(sorted(items))
methods_class.update({
(channel_type, name): cls
for name, (cls, _, _) in items.items()
})
self.new_channel_methods = new_methods
compute_methods = {}
for channel_type in ('event', 'pos', 'zone'):
items = AnalysisFactory.get_compute_methods_from_type(channel_type)
compute_methods[channel_type] = list(sorted(items))
methods_class.update({
(channel_type, name): cls
for name, (cls, _, _) in items.items()
})
self.compute_methods = compute_methods
self.methods_class = methods_class
def add_new_channel_computation(self, channel_type, method_name):
cls = self.methods_class[(channel_type, method_name)]
doc, ret_type, create_type, variables = \
AnalysisFactory.get_channel_creating_method_spec(cls, method_name)
widget = ComputeNewChannelWidget(
export_stats=self, channel_type=channel_type, method_class=cls,
name=method_name, doc=doc, create_type=create_type,
variables=variables)
self.new_channels_widget.add_widget(widget)
self.refresh_channel_names(len(self.new_channels_widget.children) - 1)
trigger = self._update_name_width_trigger[2]
widget.name_label.fbind('texture_size', trigger)
trigger()
def add_computation(self, channel_type, method_name):
cls = self.methods_class[(channel_type, method_name)]
doc, ret_type, variables = \
AnalysisFactory.get_compute_method_spec(cls, method_name)
widget = ComputeMethodWidget(
export_stats=self, channel_type=channel_type, method_class=cls,
name=method_name, doc=doc, ret_type=ret_type, variables=variables)
self.compute_methods_widget.add_widget(widget)
trigger = self._update_name_width_trigger[3]
widget.name_label.fbind('texture_size', trigger)
trigger()
def delete_new_channel(self, widget: 'ComputeNewChannelWidget'):
index = self.new_channels_widget.children[::-1].index(widget)
self.new_channels_widget.remove_widget(widget)
widget.delete_method()
old_name = widget.new_channel_name
if not old_name:
return
widgets_using_name = self.widgets_using_channel_name.pop(old_name, [])
for user in widgets_using_name:
user.spinner_item_reference_was_removed()
self.refresh_channel_names(index)
def delete_computation(self, widget: 'ComputeMethodWidget'):
self.compute_methods_widget.remove_widget(widget)
widget.delete_method()
def update_new_channel_name(
self, widget: 'ComputeNewChannelWidget', name: str
) -> str:
old_name = widget.new_channel_name
if old_name == name:
return name
if name:
# make sure channel with name doesn't exist yet
name = fix_name(
name, self.event_channel_names, self.pos_channel_names,
self.zone_channel_names)
widget.new_channel_name = name
widgets_using_name = self.widgets_using_channel_name.get(old_name, [])
for user in widgets_using_name:
if name:
user.spinner_item_reference_was_renamed(name)
else:
user.spinner_item_reference_was_removed()
if name:
# there won't be widgets using the new name - we fixed it
self.widgets_using_channel_name[name] = widgets_using_name
if old_name:
self.widgets_using_channel_name[old_name] = []
index = self.new_channels_widget.children[::-1].index(widget)
self.refresh_channel_names(index)
return name
def refresh_channel_names(self, index: int):
widgets = self.new_channels_widget.children[::-1]
if index >= 0:
index -= 1
if index == -1 or not widgets:
channels = {
'event': self.src_event_channel_names,
'pos': self.src_pos_channel_names,
'zone': self.src_zone_channel_names
}
index = 0
else:
widget: Optional['ComputeWidgetBase'] = widgets[index]
channels = {
'event': widget.event_channel_names,
'pos': widget.pos_channel_names,
'zone': widget.zone_channel_names
}
if widget.new_channel_name:
items = channels[widget.create_type] = list(
channels[widget.create_type])
insort_left(items, widget.new_channel_name)
index = widgets.index(widget) + 1
for current_widget in widgets[index:]:
for create_type, items in channels.items():
setattr(current_widget, f'{create_type}_channel_names', items)
create_type = current_widget.create_type
if current_widget.new_channel_name:
items = channels[create_type] = list(channels[create_type])
insort_left(items, current_widget.new_channel_name)
for create_type, items in channels.items():
setattr(self, f'{create_type}_channel_names', items)
def use_name_of_other_channel(self, widget, old_name: str, name: str):
if old_name == name:
return
widgets_using_name = self.widgets_using_channel_name.get(old_name, [])
if widget in widgets_using_name:
widgets_using_name.remove(widget)
if name:
if name not in self.widgets_using_channel_name:
self.widgets_using_channel_name[name] = []
self.widgets_using_channel_name[name].append(widget)
def get_analysis_spec(self) -> AnalysisSpec:
spec = AnalysisSpec()
default_variable: VariableDocWithDefault
for default_variable in self.global_parameters.children[::-1]:
for cls in default_variable.classes:
spec.add_arg_default(
cls, default_variable.name, default_variable.value)
new_channel: ComputeNewChannelWidget
for new_channel in self.new_channels_widget.children[::-1]:
if not new_channel.new_channel_name:
raise ValueError(
'no name given for the new channel created by '
f'"{new_channel.name}"')
if not new_channel.compute_channel:
raise ValueError(
'no channel was selected for the new channel created by '
f'"{new_channel.name}"')
variables = new_channel.get_variable_values()
method = getattr(
new_channel.method_class, f'compute_{new_channel.name}')
spec.add_new_channel_computation(
new_channel.compute_channel, new_channel.new_channel_name,
method, **variables)
computation: ComputeMethodWidget
for computation in self.compute_methods_widget.children[::-1]:
if not computation.compute_channels:
raise ValueError(
'no channel(s) ware selected for the computation by '
f'"{computation.name}"')
variables = computation.get_variable_values()
method = getattr(
computation.method_class, f'compute_{computation.name}')
spec.add_computation(
computation.compute_channels, method,
compute_key=computation.export_key, **variables)
return spec
class VariableDocBase(BoxLayout):
name = ''
classes = []
doc = ''
var_type = None
special_type = ''
is_global = False
name_label: Label = None
def __init__(
self, name, classes, doc, var_type, special_type, **kwargs):
self.name = name
self.classes = classes
self.doc = doc
self.var_type = var_type
self.special_type = special_type
super().__init__(**kwargs)
class VariableDocWithDefault(VariableDocBase):
value = None
is_global = True
def set_value(self, widget, value):
self.value = value
def add_variable_widget(self, container):
widget = get_variable_widget(
None, self.var_type, self.special_type, self.set_value,
optional=True, default=True
)
if widget is not None:
container.add_widget(widget)
class VariableDocNoDefault(VariableDocBase):
pass
class ComputeWidgetBase(BoxLayout):
channel_type = ''
method_class = None
name = ''
doc = ''
variables = []
export_stats: ExportStatsSelection = None
variables_container = None
variables_values: Dict[str, Any] = {}
is_variable_optional: Dict[str, bool] = {}
def __init__(
self, export_stats: ExportStatsSelection, channel_type,
method_class, name, doc, variables, **kwargs):
self.export_stats = export_stats
self.channel_type = channel_type
self.method_class = method_class
self.name = name
self.doc = doc
self.variables = variables
self.variables_values = {}
self.is_variable_optional = {}
super().__init__(**kwargs)
self.show_variables()
def get_variable_callback(self, name, widget, value):
self.variables_values[name] = value
@property
def channel_names_context(self):
return self
def show_variables(self):
container = self.variables_container
variables_values = self.variables_values
is_variable_optional = self.is_variable_optional
for name, ((var_type, optional), special_arg) in \
_sort_dict(self.variables):
variables_values[name] = None
is_variable_optional[name] = optional
widget = get_variable_widget(
self.channel_names_context, var_type, special_arg,
partial(self.get_variable_callback, name), optional=optional,
default=False)
if widget is None:
continue
container.add_widget(
Factory.MinSizeYFlatLabel(text=f'{name}:', bold=True))
container.add_widget(widget)
def get_variable_values(self) -> Dict[str, Any]:
variables_values = self.variables_values
variables = self.variables
values = {}
for name, value in variables_values.items():
if value is None:
value = default_value
(var_type, optional), special_arg = variables[name]
if value is default_value and not optional:
raise ValueError(
f'Argument "{name}" for method "{self.name}" for channel '
f'type "{self.channel_type}" requires a value but none '
f'was given')
values[name] = value
return values
def delete_method(self):
variables = _sort_dict(self.variables)
widgets = self.variables_container.children[::-1][1::2]
for widget, (name, (_, special_arg)) in zip(widgets, variables):
if isinstance(widget, SpinnerListFromContext):
widget.remove_widget_refs()
elif special_arg:
assert isinstance(
widget, Factory.VariableContextSpinner), widget
widget.remove_reference_to_spinner_item()
class ComputeNewChannelWidget(ComputeWidgetBase):
event_channel_names: List[str] = ListProperty()
pos_channel_names: List[str] = ListProperty()
zone_channel_names: List[str] = ListProperty()
new_channel_name: str = ''
compute_channel: Optional[str] = ''
channel_selector: 'SpinnerTextContextBehavior' = None
def __init__(self, create_type, **kwargs):
self.create_type = create_type
super().__init__(**kwargs)
def delete_method(self):
super().delete_method()
if self.channel_selector is not None:
self.channel_selector.remove_reference_to_spinner_item()
class ComputeMethodWidget(ComputeWidgetBase):
export_key: str = ''
compute_channels: Optional[List[str]] = []
channel_selector: 'SpinnerListFromContext' = None
def __init__(self, ret_type, **kwargs):
self.ret_type = ret_type
self.compute_channels = []
super().__init__(**kwargs)
@property
def channel_names_context(self):
return self.export_stats
def delete_method(self):
super().delete_method()
if self.channel_selector is not None:
self.channel_selector.remove_widget_refs()
def get_variable_widget(
context, var_type, special_arg, callback, optional=True, default=True):
def set_text_value(value):
if not value:
value = None
elif var_type == float:
value = float(value)
elif var_type == int:
value = int(value)
callback(widget, value)
kwargs = {}
optional_text = '(optional) ' if optional else 'required '
default_text = 'default ' if default else ''
hint = optional_text + default_text + 'value'
if special_arg:
if default:
return None
include_none_in_values = default or optional
if var_type == str:
widget = Factory.VariableContextSpinner(
variable_context=context,
variable_name=f'{special_arg}_channel_names',
variable_value_callback=callback,
include_none_in_values=include_none_in_values)
elif var_type == List[str]:
widget = SpinnerListFromContext(
variable_context=context,
variable_name=f'{special_arg}_channel_names',
variable_value_callback=callback,
include_none_in_values=include_none_in_values)
else:
assert False, (special_arg, var_type)
return widget
if var_type == float:
cls = Factory.ExportVariableTextInput
kwargs['input_filter'] = 'float'
kwargs['hint_text'] = hint
cb = set_text_value
elif var_type == int:
cls = Factory.ExportVariableTextInput
kwargs['input_filter'] = 'int'
kwargs['hint_text'] = hint
cb = set_text_value
elif var_type == str:
cls = Factory.ExportVariableTextInput
kwargs['input_filter'] = None
kwargs['hint_text'] = hint
cb = set_text_value
else:
return None
widget = cls(**kwargs)
widget.value_callback = cb
return widget
class ValueCallbackBehavior:
variable_value_callback: Optional[Callable] = None
# TODO: Make default use default_value and differentiate None/default
variable_value = ObjectProperty(None, allownone=True)
def __init__(
self, variable_value_callback: Optional[Callable] = None, **kwargs):
self.variable_value_callback = variable_value_callback
super().__init__(**kwargs)
self.fbind('variable_value', self._track_variable_value)
self._track_variable_value()
def _track_variable_value(self, *args):
if self.variable_value_callback is not None:
self.variable_value_callback(self, self.variable_value)
class SpinnerTextCallbackBehavior(ValueCallbackBehavior):
def _track_variable_value(self, *args):
value = self.variable_value
if value == '<none>' or not value:
value = None
if self.variable_value_callback is not None:
self.variable_value_callback(self, value)
class SpinnerFromContextBehavior:
variable_context = None
variable_name = ''
include_none_in_values = False
def __init__(
self, variable_context=None, variable_name: str = '',
include_none_in_values=False, **kwargs):
self.variable_context = variable_context
self.variable_name = variable_name
self.include_none_in_values = include_none_in_values
super().__init__(**kwargs)
Clock.schedule_once(self.bind_variable_values_from_context)
def bind_variable_values_from_context(self, *args):
self.variable_context.fbind(
self.variable_name, self._update_values_from_context)
self._update_values_from_context()
def _update_values_from_context(self, *args):
values = getattr(self.variable_context, self.variable_name)
if self.include_none_in_values:
self.values = ['<none>'] + values
else:
self.values = values
class SpinnerTextContextBehavior:
export_stats: ExportStatsSelection = None
last_text = ''
def __init__(self, **kwargs):
self.export_stats = App.get_running_app().export_stats
super().__init__(**kwargs)
# register it if it has a name already
self.change_spinner_item_referenced()
def spinner_item_reference_was_removed(self):
self.last_text = '<none>'
self.text = '<none>'
def spinner_item_reference_was_renamed(self, new_name):
self.last_text = new_name
self.text = new_name
def change_spinner_item_referenced(self):
last_text = self.last_text if self.last_text != '<none>' else ''
text = self.text if self.text != '<none>' else ''
self.export_stats.use_name_of_other_channel(self, last_text, text)
self.last_text = text
def remove_reference_to_spinner_item(self):
last_text = self.last_text if self.last_text != '<none>' else ''
self.export_stats.use_name_of_other_channel(self, last_text, '')
self.last_text = ''
class SpinnerChannelNameItem(SpinnerTextContextBehavior, BoxLayout):
text = StringProperty('')
bold = BooleanProperty(False)
def spinner_item_reference_was_removed(self):
super().spinner_item_reference_was_removed()
self.parent.remove_widget(self)
def remove_reference_to_spinner_item(self):
super().remove_reference_to_spinner_item()
self.parent.remove_widget(self)
class SpinnerListFromContext(
SpinnerFromContextBehavior, ValueCallbackBehavior, BoxLayout):
values: List[str] = ListProperty()
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.fbind('values', self.update_shown_values)
def update_values_from_children(self):
children = self.ids.channel_selectors.children[::-1]
# todo: use default_value
self.variable_value = [c.text for c in children] or None
self.update_shown_values()
def remove_widget_refs(self):
self.ids.channel_selector.remove_reference_to_spinner_item()
widget: SpinnerTextContextBehavior
for widget in self.ids.channel_selectors.children:
widget.remove_reference_to_spinner_item()
def update_shown_values(self, *args):
spinner = self.ids.channel_selector
used_values = self.variable_value or []
values = [v for v in self.values if v not in used_values]
spinner.values = values
if spinner.text == '<none>' or not values or values == ['<none>']:
return
if spinner.text not in used_values:
return
if values[0] == '<none>':
spinner.text = values[1]
else:
spinner.text = values[0]
Factory.register('ValueCallbackBehavior', cls=ValueCallbackBehavior)
Factory.register(
'SpinnerTextCallbackBehavior', cls=SpinnerTextCallbackBehavior)
Factory.register('SpinnerFromContextBehavior', cls=SpinnerFromContextBehavior)
Factory.register(
'SpinnerTextContextBehavior',
cls=SpinnerTextContextBehavior)
| true
| true
|
1c416f457472d08d2f9a3fddfc3e144d9ac07bc8
| 400
|
py
|
Python
|
example.py
|
julianogv/investpy2
|
8966b079539f4ca5bf42fcd23e8c8dce6c29aa36
|
[
"MIT"
] | null | null | null |
example.py
|
julianogv/investpy2
|
8966b079539f4ca5bf42fcd23e8c8dce6c29aa36
|
[
"MIT"
] | null | null | null |
example.py
|
julianogv/investpy2
|
8966b079539f4ca5bf42fcd23e8c8dce6c29aa36
|
[
"MIT"
] | null | null | null |
import investpy
res = investpy.search_quotes(text='EIMI', countries=['united kingdom'])
res = res[0] if type(res) == list else res # pick the first one
hist = investpy.get_stock_historical_data(stock=res.symbol, name=res.name, country=res.country, stock_currency='USD',
id_=res.id_, from_date='01/01/2021', to_date='13/04/2021', as_json=True)
print(hist)
| 44.444444
| 117
| 0.6675
|
import investpy
res = investpy.search_quotes(text='EIMI', countries=['united kingdom'])
res = res[0] if type(res) == list else res
hist = investpy.get_stock_historical_data(stock=res.symbol, name=res.name, country=res.country, stock_currency='USD',
id_=res.id_, from_date='01/01/2021', to_date='13/04/2021', as_json=True)
print(hist)
| true
| true
|
1c4171607f92874d2574726ebd569d40c20b9946
| 280
|
py
|
Python
|
Part_2_intermediate/mod_1/lesson_7/ex_3_import_as/school/promote.py
|
Mikma03/InfoShareacademy_Python_Courses
|
3df1008c8c92831bebf1625f960f25b39d6987e6
|
[
"MIT"
] | null | null | null |
Part_2_intermediate/mod_1/lesson_7/ex_3_import_as/school/promote.py
|
Mikma03/InfoShareacademy_Python_Courses
|
3df1008c8c92831bebf1625f960f25b39d6987e6
|
[
"MIT"
] | null | null | null |
Part_2_intermediate/mod_1/lesson_7/ex_3_import_as/school/promote.py
|
Mikma03/InfoShareacademy_Python_Courses
|
3df1008c8c92831bebf1625f960f25b39d6987e6
|
[
"MIT"
] | null | null | null |
from estudent.school import promotion_status
FAILED_GRADE = 1
def check_promotion(subjects_final_grades):
for subject, grade in subjects_final_grades.items():
if grade == FAILED_GRADE:
return promotion_status.FAILED
return promotion_status.PASSED
| 21.538462
| 56
| 0.75
|
from estudent.school import promotion_status
FAILED_GRADE = 1
def check_promotion(subjects_final_grades):
for subject, grade in subjects_final_grades.items():
if grade == FAILED_GRADE:
return promotion_status.FAILED
return promotion_status.PASSED
| true
| true
|
1c4172d29421db4b828034da6fb4f48e9971b40d
| 849
|
py
|
Python
|
kubernetes/test/test_discovery_api.py
|
mariusgheorghies/python
|
68ac7e168963d8b5a81dc493b1973d29e903a15b
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_discovery_api.py
|
mariusgheorghies/python
|
68ac7e168963d8b5a81dc493b1973d29e903a15b
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_discovery_api.py
|
mariusgheorghies/python
|
68ac7e168963d8b5a81dc493b1973d29e903a15b
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.20.7
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import kubernetes.client
from kubernetes.client.api.discovery_api import DiscoveryApi # noqa: E501
from kubernetes.client.rest import ApiException
class TestDiscoveryApi(unittest.TestCase):
"""DiscoveryApi unit test stubs"""
def setUp(self):
self.api = kubernetes.client.api.discovery_api.DiscoveryApi() # noqa: E501
def tearDown(self):
pass
def test_get_api_group(self):
"""Test case for get_api_group
"""
pass
if __name__ == '__main__':
unittest.main()
| 21.225
| 124
| 0.705536
|
from __future__ import absolute_import
import unittest
import kubernetes.client
from kubernetes.client.api.discovery_api import DiscoveryApi
from kubernetes.client.rest import ApiException
class TestDiscoveryApi(unittest.TestCase):
def setUp(self):
self.api = kubernetes.client.api.discovery_api.DiscoveryApi()
def tearDown(self):
pass
def test_get_api_group(self):
pass
if __name__ == '__main__':
unittest.main()
| true
| true
|
1c417417a4317b7aa66ae1ad89506f681b25cbf7
| 6,003
|
py
|
Python
|
loadlimit/util.py
|
arielmakestuff/loadlimit
|
70d3d23eecfe7922699098ea4901cc8673d14576
|
[
"MIT"
] | null | null | null |
loadlimit/util.py
|
arielmakestuff/loadlimit
|
70d3d23eecfe7922699098ea4901cc8673d14576
|
[
"MIT"
] | 5
|
2016-11-28T16:59:44.000Z
|
2017-03-18T17:10:40.000Z
|
loadlimit/util.py
|
arielmakestuff/loadlimit
|
70d3d23eecfe7922699098ea4901cc8673d14576
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# loadlimit/util.py
# Copyright (C) 2016 authors and contributors (see AUTHORS file)
#
# This module is released under the MIT License.
"""Utility objects and functions"""
# ============================================================================
# Imports
# ============================================================================
# Stdlib imports
import argparse
from collections import ChainMap
from collections.abc import Sequence
from enum import Enum
from functools import partial
import json
import logging
# Third-party imports
from pandas import Timestamp
from pytz import UTC
# Local imports
# ============================================================================
# Globals
# ============================================================================
LogLevel = Enum('LogLevel', [(k, v) for k, v in logging._nameToLevel.items()
if k not in ['WARN', 'NOTSET']])
TZ_UTC = UTC
# ============================================================================
# Date utils
# ============================================================================
def now(tzinfo=None):
"""Generate the current datetime.
Defaults to UTC timezone.
"""
tzinfo = 'UTC' if tzinfo is None else tzinfo
return Timestamp.now(tz=tzinfo)
# ============================================================================
# Namespace
# ============================================================================
class Namespace(argparse.Namespace):
"""Namespace extended with bool check
The bool check is to report whether the namespace is empty or not
"""
def __bool__(self):
"""Return True if attributes are being stored"""
return self != self.__class__()
# ============================================================================
# Async Iterator
# ============================================================================
class AsyncIterator:
"""Async wrapper around a non-async iterator"""
def __init__(self, obj):
self._it = iter(obj)
def __aiter__(self):
return self
async def __anext__(self):
try:
value = next(self._it)
except StopIteration:
raise StopAsyncIteration
return value
aiter = AsyncIterator
async def ageniter(gen):
"""Wrap an iterator in an async generator"""
for item in gen:
yield item
# ============================================================================
# Logger
# ============================================================================
class Logger:
"""Help make logging easier"""
__slots__ = ('_logger', '_kwargs', '_lognames')
def __init__(self, *, logger=None, name=None):
if name is None:
name = __name__.partition('.')[0]
self._logger = (logging.getLogger(name) if logger is None
else logger)
self._kwargs = {}
self._lognames = frozenset(l.name.lower() for l in LogLevel)
def __getattr__(self, name):
"""Return a logger method"""
if name not in self._lognames:
raise ValueError('Unknown log function name: {}'.format(name))
level = getattr(LogLevel, name.upper())
return partial(self.log, level)
def log(self, level, message, *args, exc_info=None, **kwargs):
"""Log message at the given level"""
if not isinstance(level, LogLevel):
msg = ('level expected LogLevel, got {} instead'.
format(type(level).__name__))
raise TypeError(msg)
kwargs = ChainMap(kwargs, self._kwargs)
logger = self._logger
func = getattr(logger, level.name.lower())
func = func if exc_info is None else partial(func, exc_info=exc_info)
msg = message.format(*args, **kwargs)
func(msg)
@property
def msgkwargs(self):
"""Update message kwargs"""
return self._kwargs
@property
def logger(self):
"""Return the underlying logger object"""
return self._logger
# ============================================================================
# Event container
# ============================================================================
EventType = Enum('EventType', ['start', 'init_start', 'init_end',
'warmup_start', 'warmup_end', 'end'])
class Event(Sequence):
__slots__ = ('_val', )
def __init__(self, event_type, timestamp=None, *, logger=None):
if not isinstance(event_type, EventType):
msg = 'event_type arg expected {} object, got {} object instead'
raise TypeError(msg.format(EventType.__name__,
type(event_type).__name__))
if timestamp is None:
timestamp = now()
if not isinstance(timestamp, Timestamp):
msg = 'timestamp arg expected {} object, got {} object instead'
raise TypeError(msg.format(Timestamp.__name__,
type(timestamp).__name__))
if not isinstance(logger, (type(None), Logger)):
msg = 'logger arg expected {} object, got {} object instead'
raise TypeError(msg.format(Logger.__name__, type(logger).__name__))
self._val = (event_type, timestamp)
if logger is not None:
msg = dict(name=event_type.name, timestamp=str(timestamp))
logger.info('EVENT: {}', json.dumps(msg))
def __getitem__(self, key):
return self._val[key]
def __len__(self):
return len(self._val)
@property
def type(self):
"""Return the event type"""
return self._val[0]
@property
def timestamp(self):
"""Return the event timestamp"""
return self._val[1]
# ============================================================================
#
# ============================================================================
| 28.860577
| 79
| 0.479927
|
import argparse
from collections import ChainMap
from collections.abc import Sequence
from enum import Enum
from functools import partial
import json
import logging
from pandas import Timestamp
from pytz import UTC
LogLevel = Enum('LogLevel', [(k, v) for k, v in logging._nameToLevel.items()
if k not in ['WARN', 'NOTSET']])
TZ_UTC = UTC
def now(tzinfo=None):
tzinfo = 'UTC' if tzinfo is None else tzinfo
return Timestamp.now(tz=tzinfo)
class Namespace(argparse.Namespace):
def __bool__(self):
return self != self.__class__()
class AsyncIterator:
def __init__(self, obj):
self._it = iter(obj)
def __aiter__(self):
return self
async def __anext__(self):
try:
value = next(self._it)
except StopIteration:
raise StopAsyncIteration
return value
aiter = AsyncIterator
async def ageniter(gen):
for item in gen:
yield item
class Logger:
__slots__ = ('_logger', '_kwargs', '_lognames')
def __init__(self, *, logger=None, name=None):
if name is None:
name = __name__.partition('.')[0]
self._logger = (logging.getLogger(name) if logger is None
else logger)
self._kwargs = {}
self._lognames = frozenset(l.name.lower() for l in LogLevel)
def __getattr__(self, name):
if name not in self._lognames:
raise ValueError('Unknown log function name: {}'.format(name))
level = getattr(LogLevel, name.upper())
return partial(self.log, level)
def log(self, level, message, *args, exc_info=None, **kwargs):
if not isinstance(level, LogLevel):
msg = ('level expected LogLevel, got {} instead'.
format(type(level).__name__))
raise TypeError(msg)
kwargs = ChainMap(kwargs, self._kwargs)
logger = self._logger
func = getattr(logger, level.name.lower())
func = func if exc_info is None else partial(func, exc_info=exc_info)
msg = message.format(*args, **kwargs)
func(msg)
@property
def msgkwargs(self):
return self._kwargs
@property
def logger(self):
return self._logger
EventType = Enum('EventType', ['start', 'init_start', 'init_end',
'warmup_start', 'warmup_end', 'end'])
class Event(Sequence):
__slots__ = ('_val', )
def __init__(self, event_type, timestamp=None, *, logger=None):
if not isinstance(event_type, EventType):
msg = 'event_type arg expected {} object, got {} object instead'
raise TypeError(msg.format(EventType.__name__,
type(event_type).__name__))
if timestamp is None:
timestamp = now()
if not isinstance(timestamp, Timestamp):
msg = 'timestamp arg expected {} object, got {} object instead'
raise TypeError(msg.format(Timestamp.__name__,
type(timestamp).__name__))
if not isinstance(logger, (type(None), Logger)):
msg = 'logger arg expected {} object, got {} object instead'
raise TypeError(msg.format(Logger.__name__, type(logger).__name__))
self._val = (event_type, timestamp)
if logger is not None:
msg = dict(name=event_type.name, timestamp=str(timestamp))
logger.info('EVENT: {}', json.dumps(msg))
def __getitem__(self, key):
return self._val[key]
def __len__(self):
return len(self._val)
@property
def type(self):
return self._val[0]
@property
def timestamp(self):
return self._val[1]
| true
| true
|
1c4174a9a2d7e5bac4683caf8717aa9d50040e27
| 145
|
py
|
Python
|
apache/apr_strings.py
|
GrahamDumpleton-abandoned/apswigpy
|
b821b94a78ceed5b8991f7c345aeeadca3729a90
|
[
"Apache-2.0"
] | null | null | null |
apache/apr_strings.py
|
GrahamDumpleton-abandoned/apswigpy
|
b821b94a78ceed5b8991f7c345aeeadca3729a90
|
[
"Apache-2.0"
] | null | null | null |
apache/apr_strings.py
|
GrahamDumpleton-abandoned/apswigpy
|
b821b94a78ceed5b8991f7c345aeeadca3729a90
|
[
"Apache-2.0"
] | null | null | null |
import apache
if apache.version == (2, 2):
from apache22.apr_strings import *
else:
raise RuntimeError('Apache version not supported.')
| 20.714286
| 55
| 0.717241
|
import apache
if apache.version == (2, 2):
from apache22.apr_strings import *
else:
raise RuntimeError('Apache version not supported.')
| true
| true
|
1c41757ec6aa6ba62ffac192081d3aa8fce2cbf2
| 21,768
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_08_01/aio/operations/_express_route_connections_operations.py
|
praveenkuttappan/azure-sdk-for-python
|
4b79413667b7539750a6c7dde15737013a3d4bd5
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_08_01/aio/operations/_express_route_connections_operations.py
|
v-xuto/azure-sdk-for-python
|
9c6296d22094c5ede410bc83749e8df8694ccacc
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_08_01/aio/operations/_express_route_connections_operations.py
|
v-xuto/azure-sdk-for-python
|
9c6296d22094c5ede410bc83749e8df8694ccacc
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteConnectionsOperations:
"""ExpressRouteConnectionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
express_route_gateway_name: str,
connection_name: str,
put_express_route_connection_parameters: "_models.ExpressRouteConnection",
**kwargs: Any
) -> "_models.ExpressRouteConnection":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(put_express_route_connection_parameters, 'ExpressRouteConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteConnection', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}/expressRouteConnections/{connectionName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
express_route_gateway_name: str,
connection_name: str,
put_express_route_connection_parameters: "_models.ExpressRouteConnection",
**kwargs: Any
) -> AsyncLROPoller["_models.ExpressRouteConnection"]:
"""Creates a connection between an ExpressRoute gateway and an ExpressRoute circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_gateway_name: The name of the ExpressRoute gateway.
:type express_route_gateway_name: str
:param connection_name: The name of the connection subresource.
:type connection_name: str
:param put_express_route_connection_parameters: Parameters required in an
ExpressRouteConnection PUT operation.
:type put_express_route_connection_parameters: ~azure.mgmt.network.v2020_08_01.models.ExpressRouteConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteConnection or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_08_01.models.ExpressRouteConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
express_route_gateway_name=express_route_gateway_name,
connection_name=connection_name,
put_express_route_connection_parameters=put_express_route_connection_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}/expressRouteConnections/{connectionName}'} # type: ignore
async def get(
self,
resource_group_name: str,
express_route_gateway_name: str,
connection_name: str,
**kwargs: Any
) -> "_models.ExpressRouteConnection":
"""Gets the specified ExpressRouteConnection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_gateway_name: The name of the ExpressRoute gateway.
:type express_route_gateway_name: str
:param connection_name: The name of the ExpressRoute connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_08_01.models.ExpressRouteConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}/expressRouteConnections/{connectionName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
express_route_gateway_name: str,
connection_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}/expressRouteConnections/{connectionName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
express_route_gateway_name: str,
connection_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a connection to a ExpressRoute circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_gateway_name: The name of the ExpressRoute gateway.
:type express_route_gateway_name: str
:param connection_name: The name of the connection subresource.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
express_route_gateway_name=express_route_gateway_name,
connection_name=connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}/expressRouteConnections/{connectionName}'} # type: ignore
async def list(
self,
resource_group_name: str,
express_route_gateway_name: str,
**kwargs: Any
) -> "_models.ExpressRouteConnectionList":
"""Lists ExpressRouteConnections.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_gateway_name: The name of the ExpressRoute gateway.
:type express_route_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteConnectionList, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_08_01.models.ExpressRouteConnectionList
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteConnectionList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteConnectionList', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}/expressRouteConnections'} # type: ignore
| 52.453012
| 250
| 0.692301
|
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteConnectionsOperations:
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
express_route_gateway_name: str,
connection_name: str,
put_express_route_connection_parameters: "_models.ExpressRouteConnection",
**kwargs: Any
) -> "_models.ExpressRouteConnection":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self._create_or_update_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(put_express_route_connection_parameters, 'ExpressRouteConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteConnection', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}/expressRouteConnections/{connectionName}'}
async def begin_create_or_update(
self,
resource_group_name: str,
express_route_gateway_name: str,
connection_name: str,
put_express_route_connection_parameters: "_models.ExpressRouteConnection",
**kwargs: Any
) -> AsyncLROPoller["_models.ExpressRouteConnection"]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
express_route_gateway_name=express_route_gateway_name,
connection_name=connection_name,
put_express_route_connection_parameters=put_express_route_connection_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}/expressRouteConnections/{connectionName}'}
async def get(
self,
resource_group_name: str,
express_route_gateway_name: str,
connection_name: str,
**kwargs: Any
) -> "_models.ExpressRouteConnection":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}/expressRouteConnections/{connectionName}'}
async def _delete_initial(
self,
resource_group_name: str,
express_route_gateway_name: str,
connection_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
url = self._delete_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}/expressRouteConnections/{connectionName}'}
async def begin_delete(
self,
resource_group_name: str,
express_route_gateway_name: str,
connection_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
express_route_gateway_name=express_route_gateway_name,
connection_name=connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}/expressRouteConnections/{connectionName}'}
async def list(
self,
resource_group_name: str,
express_route_gateway_name: str,
**kwargs: Any
) -> "_models.ExpressRouteConnectionList":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteConnectionList', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}/expressRouteConnections'}
| true
| true
|
1c4176014c51bfce06a5403ed939263bcadfe2e2
| 40,754
|
py
|
Python
|
tests/file_storage/tests.py
|
imjvdn/scratch-game-1
|
5dffd79f17e0b66d3d2e57262749311aca28e850
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 5
|
2019-10-17T21:29:53.000Z
|
2021-06-23T16:27:02.000Z
|
tests/file_storage/tests.py
|
imjvdn/scratch-game-1
|
5dffd79f17e0b66d3d2e57262749311aca28e850
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 7
|
2021-03-26T03:13:38.000Z
|
2022-03-12T00:42:03.000Z
|
tests/file_storage/tests.py
|
imjvdn/scratch-game-1
|
5dffd79f17e0b66d3d2e57262749311aca28e850
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 11
|
2019-09-14T20:57:30.000Z
|
2022-01-19T17:59:26.000Z
|
import os
import shutil
import sys
import tempfile
import threading
import time
import unittest
from datetime import datetime, timedelta
from io import StringIO
from urllib.request import urlopen
from django.core.cache import cache
from django.core.exceptions import SuspiciousFileOperation
from django.core.files.base import ContentFile, File
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.core.files.uploadedfile import (
InMemoryUploadedFile, SimpleUploadedFile, TemporaryUploadedFile,
)
from django.db.models.fields.files import FileDescriptor
from django.test import (
LiveServerTestCase, SimpleTestCase, TestCase, override_settings,
)
from django.test.utils import requires_tz_support
from django.urls import NoReverseMatch, reverse_lazy
from django.utils import timezone
from .models import Storage, temp_storage, temp_storage_location
FILE_SUFFIX_REGEX = '[A-Za-z0-9]{7}'
class GetStorageClassTests(SimpleTestCase):
def test_get_filesystem_storage(self):
"""
get_storage_class returns the class for a storage backend name/path.
"""
self.assertEqual(
get_storage_class('django.core.files.storage.FileSystemStorage'),
FileSystemStorage)
def test_get_invalid_storage_module(self):
"""
get_storage_class raises an error if the requested import don't exist.
"""
with self.assertRaisesMessage(ImportError, "No module named 'storage'"):
get_storage_class('storage.NonexistentStorage')
def test_get_nonexistent_storage_class(self):
"""
get_storage_class raises an error if the requested class don't exist.
"""
with self.assertRaises(ImportError):
get_storage_class('django.core.files.storage.NonexistentStorage')
def test_get_nonexistent_storage_module(self):
"""
get_storage_class raises an error if the requested module don't exist.
"""
with self.assertRaisesMessage(ImportError, "No module named 'django.core.files.nonexistent_storage'"):
get_storage_class('django.core.files.nonexistent_storage.NonexistentStorage')
class FileSystemStorageTests(unittest.TestCase):
def test_deconstruction(self):
path, args, kwargs = temp_storage.deconstruct()
self.assertEqual(path, "django.core.files.storage.FileSystemStorage")
self.assertEqual(args, ())
self.assertEqual(kwargs, {'location': temp_storage_location})
kwargs_orig = {
'location': temp_storage_location,
'base_url': 'http://myfiles.example.com/'
}
storage = FileSystemStorage(**kwargs_orig)
path, args, kwargs = storage.deconstruct()
self.assertEqual(kwargs, kwargs_orig)
def test_lazy_base_url_init(self):
"""
FileSystemStorage.__init__() shouldn't evaluate base_url.
"""
storage = FileSystemStorage(base_url=reverse_lazy('app:url'))
with self.assertRaises(NoReverseMatch):
storage.url(storage.base_url)
class FileStorageTests(SimpleTestCase):
storage_class = FileSystemStorage
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = self.storage_class(location=self.temp_dir, base_url='/test_media_url/')
# Set up a second temporary directory which is ensured to have a mixed
# case name.
self.temp_dir2 = tempfile.mkdtemp(suffix='aBc')
def tearDown(self):
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.temp_dir2)
def test_empty_location(self):
"""
Makes sure an exception is raised if the location is empty
"""
storage = self.storage_class(location='')
self.assertEqual(storage.base_location, '')
self.assertEqual(storage.location, os.getcwd())
def test_file_access_options(self):
"""
Standard file access options are available, and work as expected.
"""
self.assertFalse(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'w')
f.write('storage contents')
f.close()
self.assertTrue(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'r')
self.assertEqual(f.read(), 'storage contents')
f.close()
self.storage.delete('storage_test')
self.assertFalse(self.storage.exists('storage_test'))
def _test_file_time_getter(self, getter):
# Check for correct behavior under both USE_TZ=True and USE_TZ=False.
# The tests are similar since they both set up a situation where the
# system time zone, Django's TIME_ZONE, and UTC are distinct.
self._test_file_time_getter_tz_handling_on(getter)
self._test_file_time_getter_tz_handling_off(getter)
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Algiers')
def _test_file_time_getter_tz_handling_on(self, getter):
# Django's TZ (and hence the system TZ) is set to Africa/Algiers which
# is UTC+1 and has no DST change. We can set the Django TZ to something
# else so that UTC, Django's TIME_ZONE, and the system timezone are all
# different.
now_in_algiers = timezone.make_aware(datetime.now())
with timezone.override(timezone.get_fixed_timezone(-300)):
# At this point the system TZ is +1 and the Django TZ
# is -5. The following will be aware in UTC.
now = timezone.now()
self.assertFalse(self.storage.exists('test.file.tz.on'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file.tz.on', f)
self.addCleanup(self.storage.delete, f_name)
dt = getter(f_name)
# dt should be aware, in UTC
self.assertTrue(timezone.is_aware(dt))
self.assertEqual(now.tzname(), dt.tzname())
# The three timezones are indeed distinct.
naive_now = datetime.now()
algiers_offset = now_in_algiers.tzinfo.utcoffset(naive_now)
django_offset = timezone.get_current_timezone().utcoffset(naive_now)
utc_offset = timezone.utc.utcoffset(naive_now)
self.assertGreater(algiers_offset, utc_offset)
self.assertLess(django_offset, utc_offset)
# dt and now should be the same effective time.
self.assertLess(abs(dt - now), timedelta(seconds=2))
@override_settings(USE_TZ=False, TIME_ZONE='Africa/Algiers')
def _test_file_time_getter_tz_handling_off(self, getter):
# Django's TZ (and hence the system TZ) is set to Africa/Algiers which
# is UTC+1 and has no DST change. We can set the Django TZ to something
# else so that UTC, Django's TIME_ZONE, and the system timezone are all
# different.
now_in_algiers = timezone.make_aware(datetime.now())
with timezone.override(timezone.get_fixed_timezone(-300)):
# At this point the system TZ is +1 and the Django TZ
# is -5.
self.assertFalse(self.storage.exists('test.file.tz.off'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file.tz.off', f)
self.addCleanup(self.storage.delete, f_name)
dt = getter(f_name)
# dt should be naive, in system (+1) TZ
self.assertTrue(timezone.is_naive(dt))
# The three timezones are indeed distinct.
naive_now = datetime.now()
algiers_offset = now_in_algiers.tzinfo.utcoffset(naive_now)
django_offset = timezone.get_current_timezone().utcoffset(naive_now)
utc_offset = timezone.utc.utcoffset(naive_now)
self.assertGreater(algiers_offset, utc_offset)
self.assertLess(django_offset, utc_offset)
# dt and naive_now should be the same effective time.
self.assertLess(abs(dt - naive_now), timedelta(seconds=2))
# If we convert dt to an aware object using the Algiers
# timezone then it should be the same effective time to
# now_in_algiers.
_dt = timezone.make_aware(dt, now_in_algiers.tzinfo)
self.assertLess(abs(_dt - now_in_algiers), timedelta(seconds=2))
def test_file_get_accessed_time(self):
"""
File storage returns a Datetime object for the last accessed time of
a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.addCleanup(self.storage.delete, f_name)
atime = self.storage.get_accessed_time(f_name)
self.assertEqual(atime, datetime.fromtimestamp(os.path.getatime(self.storage.path(f_name))))
self.assertLess(timezone.now() - self.storage.get_accessed_time(f_name), timedelta(seconds=2))
@requires_tz_support
def test_file_get_accessed_time_timezone(self):
self._test_file_time_getter(self.storage.get_accessed_time)
def test_file_get_created_time(self):
"""
File storage returns a datetime for the creation time of a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.addCleanup(self.storage.delete, f_name)
ctime = self.storage.get_created_time(f_name)
self.assertEqual(ctime, datetime.fromtimestamp(os.path.getctime(self.storage.path(f_name))))
self.assertLess(timezone.now() - self.storage.get_created_time(f_name), timedelta(seconds=2))
@requires_tz_support
def test_file_get_created_time_timezone(self):
self._test_file_time_getter(self.storage.get_created_time)
def test_file_get_modified_time(self):
"""
File storage returns a datetime for the last modified time of a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.addCleanup(self.storage.delete, f_name)
mtime = self.storage.get_modified_time(f_name)
self.assertEqual(mtime, datetime.fromtimestamp(os.path.getmtime(self.storage.path(f_name))))
self.assertLess(timezone.now() - self.storage.get_modified_time(f_name), timedelta(seconds=2))
@requires_tz_support
def test_file_get_modified_time_timezone(self):
self._test_file_time_getter(self.storage.get_modified_time)
def test_file_save_without_name(self):
"""
File storage extracts the filename from the content object if no
name is given explicitly.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f.name = 'test.file'
storage_f_name = self.storage.save(None, f)
self.assertEqual(storage_f_name, f.name)
self.assertTrue(os.path.exists(os.path.join(self.temp_dir, f.name)))
self.storage.delete(storage_f_name)
def test_file_save_with_path(self):
"""
Saving a pathname should create intermediate directories as necessary.
"""
self.assertFalse(self.storage.exists('path/to'))
self.storage.save('path/to/test.file', ContentFile('file saved with path'))
self.assertTrue(self.storage.exists('path/to'))
with self.storage.open('path/to/test.file') as f:
self.assertEqual(f.read(), b'file saved with path')
self.assertTrue(os.path.exists(
os.path.join(self.temp_dir, 'path', 'to', 'test.file')))
self.storage.delete('path/to/test.file')
def test_save_doesnt_close(self):
with TemporaryUploadedFile('test', 'text/plain', 1, 'utf8') as file:
file.write(b'1')
file.seek(0)
self.assertFalse(file.closed)
self.storage.save('path/to/test.file', file)
self.assertFalse(file.closed)
self.assertFalse(file.file.closed)
file = InMemoryUploadedFile(StringIO('1'), '', 'test', 'text/plain', 1, 'utf8')
with file:
self.assertFalse(file.closed)
self.storage.save('path/to/test.file', file)
self.assertFalse(file.closed)
self.assertFalse(file.file.closed)
def test_file_path(self):
"""
File storage returns the full path of a file
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.assertEqual(self.storage.path(f_name), os.path.join(self.temp_dir, f_name))
self.storage.delete(f_name)
def test_file_url(self):
"""
File storage returns a url to access a given file from the Web.
"""
self.assertEqual(self.storage.url('test.file'), self.storage.base_url + 'test.file')
# should encode special chars except ~!*()'
# like encodeURIComponent() JavaScript function do
self.assertEqual(
self.storage.url(r"~!*()'@#$%^&*abc`+ =.file"),
"/test_media_url/~!*()'%40%23%24%25%5E%26*abc%60%2B%20%3D.file"
)
self.assertEqual(self.storage.url("ab\0c"), "/test_media_url/ab%00c")
# should translate os path separator(s) to the url path separator
self.assertEqual(self.storage.url("""a/b\\c.file"""), "/test_media_url/a/b/c.file")
# #25905: remove leading slashes from file names to prevent unsafe url output
self.assertEqual(self.storage.url("/evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url(r"\evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url("///evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url(r"\\\evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url(None), "/test_media_url/")
def test_base_url(self):
"""
File storage returns a url even when its base_url is unset or modified.
"""
self.storage.base_url = None
with self.assertRaises(ValueError):
self.storage.url('test.file')
# #22717: missing ending slash in base_url should be auto-corrected
storage = self.storage_class(location=self.temp_dir, base_url='/no_ending_slash')
self.assertEqual(
storage.url('test.file'),
'%s%s' % (storage.base_url, 'test.file')
)
def test_listdir(self):
"""
File storage returns a tuple containing directories and files.
"""
self.assertFalse(self.storage.exists('storage_test_1'))
self.assertFalse(self.storage.exists('storage_test_2'))
self.assertFalse(self.storage.exists('storage_dir_1'))
self.storage.save('storage_test_1', ContentFile('custom content'))
self.storage.save('storage_test_2', ContentFile('custom content'))
os.mkdir(os.path.join(self.temp_dir, 'storage_dir_1'))
dirs, files = self.storage.listdir('')
self.assertEqual(set(dirs), {'storage_dir_1'})
self.assertEqual(set(files), {'storage_test_1', 'storage_test_2'})
self.storage.delete('storage_test_1')
self.storage.delete('storage_test_2')
os.rmdir(os.path.join(self.temp_dir, 'storage_dir_1'))
def test_file_storage_prevents_directory_traversal(self):
"""
File storage prevents directory traversal (files can only be accessed if
they're below the storage location).
"""
with self.assertRaises(SuspiciousFileOperation):
self.storage.exists('..')
with self.assertRaises(SuspiciousFileOperation):
self.storage.exists('/etc/passwd')
def test_file_storage_preserves_filename_case(self):
"""The storage backend should preserve case of filenames."""
# Create a storage backend associated with the mixed case name
# directory.
other_temp_storage = self.storage_class(location=self.temp_dir2)
# Ask that storage backend to store a file with a mixed case filename.
mixed_case = 'CaSe_SeNsItIvE'
file = other_temp_storage.open(mixed_case, 'w')
file.write('storage contents')
file.close()
self.assertEqual(os.path.join(self.temp_dir2, mixed_case), other_temp_storage.path(mixed_case))
other_temp_storage.delete(mixed_case)
def test_makedirs_race_handling(self):
"""
File storage should be robust against directory creation race conditions.
"""
real_makedirs = os.makedirs
# Monkey-patch os.makedirs, to simulate a normal call, a raced call,
# and an error.
def fake_makedirs(path, mode=0o777, exist_ok=False):
if path == os.path.join(self.temp_dir, 'normal'):
real_makedirs(path, mode, exist_ok)
elif path == os.path.join(self.temp_dir, 'raced'):
real_makedirs(path, mode, exist_ok)
if not exist_ok:
raise FileExistsError()
elif path == os.path.join(self.temp_dir, 'error'):
raise PermissionError()
else:
self.fail('unexpected argument %r' % path)
try:
os.makedirs = fake_makedirs
self.storage.save('normal/test.file', ContentFile('saved normally'))
with self.storage.open('normal/test.file') as f:
self.assertEqual(f.read(), b'saved normally')
self.storage.save('raced/test.file', ContentFile('saved with race'))
with self.storage.open('raced/test.file') as f:
self.assertEqual(f.read(), b'saved with race')
# Exceptions aside from FileExistsError are raised.
with self.assertRaises(PermissionError):
self.storage.save('error/test.file', ContentFile('not saved'))
finally:
os.makedirs = real_makedirs
def test_remove_race_handling(self):
"""
File storage should be robust against file removal race conditions.
"""
real_remove = os.remove
# Monkey-patch os.remove, to simulate a normal call, a raced call,
# and an error.
def fake_remove(path):
if path == os.path.join(self.temp_dir, 'normal.file'):
real_remove(path)
elif path == os.path.join(self.temp_dir, 'raced.file'):
real_remove(path)
raise FileNotFoundError()
elif path == os.path.join(self.temp_dir, 'error.file'):
raise PermissionError()
else:
self.fail('unexpected argument %r' % path)
try:
os.remove = fake_remove
self.storage.save('normal.file', ContentFile('delete normally'))
self.storage.delete('normal.file')
self.assertFalse(self.storage.exists('normal.file'))
self.storage.save('raced.file', ContentFile('delete with race'))
self.storage.delete('raced.file')
self.assertFalse(self.storage.exists('normal.file'))
# Exceptions aside from FileNotFoundError are raised.
self.storage.save('error.file', ContentFile('delete with error'))
with self.assertRaises(PermissionError):
self.storage.delete('error.file')
finally:
os.remove = real_remove
def test_file_chunks_error(self):
"""
Test behavior when file.chunks() is raising an error
"""
f1 = ContentFile('chunks fails')
def failing_chunks():
raise OSError
f1.chunks = failing_chunks
with self.assertRaises(OSError):
self.storage.save('error.file', f1)
def test_delete_no_name(self):
"""
Calling delete with an empty name should not try to remove the base
storage directory, but fail loudly (#20660).
"""
with self.assertRaises(AssertionError):
self.storage.delete('')
def test_delete_deletes_directories(self):
tmp_dir = tempfile.mkdtemp(dir=self.storage.location)
self.storage.delete(tmp_dir)
self.assertFalse(os.path.exists(tmp_dir))
@override_settings(
MEDIA_ROOT='media_root',
MEDIA_URL='media_url/',
FILE_UPLOAD_PERMISSIONS=0o777,
FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o777,
)
def test_setting_changed(self):
"""
Properties using settings values as defaults should be updated on
referenced settings change while specified values should be unchanged.
"""
storage = self.storage_class(
location='explicit_location',
base_url='explicit_base_url/',
file_permissions_mode=0o666,
directory_permissions_mode=0o666,
)
defaults_storage = self.storage_class()
settings = {
'MEDIA_ROOT': 'overridden_media_root',
'MEDIA_URL': 'overridden_media_url/',
'FILE_UPLOAD_PERMISSIONS': 0o333,
'FILE_UPLOAD_DIRECTORY_PERMISSIONS': 0o333,
}
with self.settings(**settings):
self.assertEqual(storage.base_location, 'explicit_location')
self.assertIn('explicit_location', storage.location)
self.assertEqual(storage.base_url, 'explicit_base_url/')
self.assertEqual(storage.file_permissions_mode, 0o666)
self.assertEqual(storage.directory_permissions_mode, 0o666)
self.assertEqual(defaults_storage.base_location, settings['MEDIA_ROOT'])
self.assertIn(settings['MEDIA_ROOT'], defaults_storage.location)
self.assertEqual(defaults_storage.base_url, settings['MEDIA_URL'])
self.assertEqual(defaults_storage.file_permissions_mode, settings['FILE_UPLOAD_PERMISSIONS'])
self.assertEqual(
defaults_storage.directory_permissions_mode, settings['FILE_UPLOAD_DIRECTORY_PERMISSIONS']
)
class CustomStorage(FileSystemStorage):
def get_available_name(self, name, max_length=None):
"""
Append numbers to duplicate files rather than underscores, like Trac.
"""
basename, *ext = name.split('.')
number = 2
while self.exists(name):
name = '.'.join([basename, str(number)] + ext)
number += 1
return name
class CustomStorageTests(FileStorageTests):
storage_class = CustomStorage
def test_custom_get_available_name(self):
first = self.storage.save('custom_storage', ContentFile('custom contents'))
self.assertEqual(first, 'custom_storage')
second = self.storage.save('custom_storage', ContentFile('more contents'))
self.assertEqual(second, 'custom_storage.2')
self.storage.delete(first)
self.storage.delete(second)
class OverwritingStorage(FileSystemStorage):
"""
Overwrite existing files instead of appending a suffix to generate an
unused name.
"""
# Mask out O_EXCL so os.open() doesn't raise OSError if the file exists.
OS_OPEN_FLAGS = FileSystemStorage.OS_OPEN_FLAGS & ~os.O_EXCL
def get_available_name(self, name, max_length=None):
"""Override the effort to find an used name."""
return name
class OverwritingStorageTests(FileStorageTests):
storage_class = OverwritingStorage
def test_save_overwrite_behavior(self):
"""Saving to same file name twice overwrites the first file."""
name = 'test.file'
self.assertFalse(self.storage.exists(name))
content_1 = b'content one'
content_2 = b'second content'
f_1 = ContentFile(content_1)
f_2 = ContentFile(content_2)
stored_name_1 = self.storage.save(name, f_1)
try:
self.assertEqual(stored_name_1, name)
self.assertTrue(self.storage.exists(name))
self.assertTrue(os.path.exists(os.path.join(self.temp_dir, name)))
with self.storage.open(name) as fp:
self.assertEqual(fp.read(), content_1)
stored_name_2 = self.storage.save(name, f_2)
self.assertEqual(stored_name_2, name)
self.assertTrue(self.storage.exists(name))
self.assertTrue(os.path.exists(os.path.join(self.temp_dir, name)))
with self.storage.open(name) as fp:
self.assertEqual(fp.read(), content_2)
finally:
self.storage.delete(name)
class DiscardingFalseContentStorage(FileSystemStorage):
def _save(self, name, content):
if content:
return super()._save(name, content)
return ''
class DiscardingFalseContentStorageTests(FileStorageTests):
storage_class = DiscardingFalseContentStorage
def test_custom_storage_discarding_empty_content(self):
"""
When Storage.save() wraps a file-like object in File, it should include
the name argument so that bool(file) evaluates to True (#26495).
"""
output = StringIO('content')
self.storage.save('tests/stringio', output)
self.assertTrue(self.storage.exists('tests/stringio'))
with self.storage.open('tests/stringio') as f:
self.assertEqual(f.read(), b'content')
class FileFieldStorageTests(TestCase):
def tearDown(self):
shutil.rmtree(temp_storage_location)
def _storage_max_filename_length(self, storage):
"""
Query filesystem for maximum filename length (e.g. AUFS has 242).
"""
dir_to_test = storage.location
while not os.path.exists(dir_to_test):
dir_to_test = os.path.dirname(dir_to_test)
try:
return os.pathconf(dir_to_test, 'PC_NAME_MAX')
except Exception:
return 255 # Should be safe on most backends
def test_files(self):
self.assertIsInstance(Storage.normal, FileDescriptor)
# An object without a file has limited functionality.
obj1 = Storage()
self.assertEqual(obj1.normal.name, "")
with self.assertRaises(ValueError):
obj1.normal.size
# Saving a file enables full functionality.
obj1.normal.save("django_test.txt", ContentFile("content"))
self.assertEqual(obj1.normal.name, "tests/django_test.txt")
self.assertEqual(obj1.normal.size, 7)
self.assertEqual(obj1.normal.read(), b"content")
obj1.normal.close()
# File objects can be assigned to FileField attributes, but shouldn't
# get committed until the model it's attached to is saved.
obj1.normal = SimpleUploadedFile("assignment.txt", b"content")
dirs, files = temp_storage.listdir("tests")
self.assertEqual(dirs, [])
self.assertNotIn("assignment.txt", files)
obj1.save()
dirs, files = temp_storage.listdir("tests")
self.assertEqual(sorted(files), ["assignment.txt", "django_test.txt"])
# Save another file with the same name.
obj2 = Storage()
obj2.normal.save("django_test.txt", ContentFile("more content"))
obj2_name = obj2.normal.name
self.assertRegex(obj2_name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX)
self.assertEqual(obj2.normal.size, 12)
obj2.normal.close()
# Deleting an object does not delete the file it uses.
obj2.delete()
obj2.normal.save("django_test.txt", ContentFile("more content"))
self.assertNotEqual(obj2_name, obj2.normal.name)
self.assertRegex(obj2.normal.name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX)
obj2.normal.close()
def test_filefield_read(self):
# Files can be read in a little at a time, if necessary.
obj = Storage.objects.create(
normal=SimpleUploadedFile("assignment.txt", b"content"))
obj.normal.open()
self.assertEqual(obj.normal.read(3), b"con")
self.assertEqual(obj.normal.read(), b"tent")
self.assertEqual(list(obj.normal.chunks(chunk_size=2)), [b"co", b"nt", b"en", b"t"])
obj.normal.close()
def test_filefield_write(self):
# Files can be written to.
obj = Storage.objects.create(normal=SimpleUploadedFile('rewritten.txt', b'content'))
with obj.normal as normal:
normal.open('wb')
normal.write(b'updated')
obj.refresh_from_db()
self.assertEqual(obj.normal.read(), b'updated')
obj.normal.close()
def test_filefield_reopen(self):
obj = Storage.objects.create(normal=SimpleUploadedFile('reopen.txt', b'content'))
with obj.normal as normal:
normal.open()
obj.normal.open()
obj.normal.file.seek(0)
obj.normal.close()
def test_duplicate_filename(self):
# Multiple files with the same name get _(7 random chars) appended to them.
objs = [Storage() for i in range(2)]
for o in objs:
o.normal.save("multiple_files.txt", ContentFile("Same Content"))
try:
names = [o.normal.name for o in objs]
self.assertEqual(names[0], "tests/multiple_files.txt")
self.assertRegex(names[1], "tests/multiple_files_%s.txt" % FILE_SUFFIX_REGEX)
finally:
for o in objs:
o.delete()
def test_file_truncation(self):
# Given the max_length is limited, when multiple files get uploaded
# under the same name, then the filename get truncated in order to fit
# in _(7 random chars). When most of the max_length is taken by
# dirname + extension and there are not enough characters in the
# filename to truncate, an exception should be raised.
objs = [Storage() for i in range(2)]
filename = 'filename.ext'
for o in objs:
o.limited_length.save(filename, ContentFile('Same Content'))
try:
# Testing truncation.
names = [o.limited_length.name for o in objs]
self.assertEqual(names[0], 'tests/%s' % filename)
self.assertRegex(names[1], 'tests/fi_%s.ext' % FILE_SUFFIX_REGEX)
# Testing exception is raised when filename is too short to truncate.
filename = 'short.longext'
objs[0].limited_length.save(filename, ContentFile('Same Content'))
with self.assertRaisesMessage(SuspiciousFileOperation, 'Storage can not find an available filename'):
objs[1].limited_length.save(*(filename, ContentFile('Same Content')))
finally:
for o in objs:
o.delete()
@unittest.skipIf(
sys.platform.startswith('win'),
"Windows supports at most 260 characters in a path.",
)
def test_extended_length_storage(self):
# Testing FileField with max_length > 255. Most systems have filename
# length limitation of 255. Path takes extra chars.
filename = (self._storage_max_filename_length(temp_storage) - 4) * 'a' # 4 chars for extension.
obj = Storage()
obj.extended_length.save('%s.txt' % filename, ContentFile('Same Content'))
self.assertEqual(obj.extended_length.name, 'tests/%s.txt' % filename)
self.assertEqual(obj.extended_length.read(), b'Same Content')
obj.extended_length.close()
def test_filefield_default(self):
# Default values allow an object to access a single file.
temp_storage.save('tests/default.txt', ContentFile('default content'))
obj = Storage.objects.create()
self.assertEqual(obj.default.name, "tests/default.txt")
self.assertEqual(obj.default.read(), b"default content")
obj.default.close()
# But it shouldn't be deleted, even if there are no more objects using
# it.
obj.delete()
obj = Storage()
self.assertEqual(obj.default.read(), b"default content")
obj.default.close()
def test_empty_upload_to(self):
# upload_to can be empty, meaning it does not use subdirectory.
obj = Storage()
obj.empty.save('django_test.txt', ContentFile('more content'))
self.assertEqual(obj.empty.name, "django_test.txt")
self.assertEqual(obj.empty.read(), b"more content")
obj.empty.close()
def test_pathlib_upload_to(self):
obj = Storage()
obj.pathlib_callable.save('some_file1.txt', ContentFile('some content'))
self.assertEqual(obj.pathlib_callable.name, 'bar/some_file1.txt')
obj.pathlib_direct.save('some_file2.txt', ContentFile('some content'))
self.assertEqual(obj.pathlib_direct.name, 'bar/some_file2.txt')
obj.random.close()
def test_random_upload_to(self):
# Verify the fix for #5655, making sure the directory is only
# determined once.
obj = Storage()
obj.random.save("random_file", ContentFile("random content"))
self.assertTrue(obj.random.name.endswith("/random_file"))
obj.random.close()
def test_custom_valid_name_callable_upload_to(self):
"""
Storage.get_valid_name() should be called when upload_to is a callable.
"""
obj = Storage()
obj.custom_valid_name.save("random_file", ContentFile("random content"))
# CustomValidNameStorage.get_valid_name() appends '_valid' to the name
self.assertTrue(obj.custom_valid_name.name.endswith("/random_file_valid"))
obj.custom_valid_name.close()
def test_filefield_pickling(self):
# Push an object into the cache to make sure it pickles properly
obj = Storage()
obj.normal.save("django_test.txt", ContentFile("more content"))
obj.normal.close()
cache.set("obj", obj)
self.assertEqual(cache.get("obj").normal.name, "tests/django_test.txt")
def test_file_object(self):
# Create sample file
temp_storage.save('tests/example.txt', ContentFile('some content'))
# Load it as Python file object
with open(temp_storage.path('tests/example.txt')) as file_obj:
# Save it using storage and read its content
temp_storage.save('tests/file_obj', file_obj)
self.assertTrue(temp_storage.exists('tests/file_obj'))
with temp_storage.open('tests/file_obj') as f:
self.assertEqual(f.read(), b'some content')
def test_stringio(self):
# Test passing StringIO instance as content argument to save
output = StringIO()
output.write('content')
output.seek(0)
# Save it and read written file
temp_storage.save('tests/stringio', output)
self.assertTrue(temp_storage.exists('tests/stringio'))
with temp_storage.open('tests/stringio') as f:
self.assertEqual(f.read(), b'content')
# Tests for a race condition on file saving (#4948).
# This is written in such a way that it'll always pass on platforms
# without threading.
class SlowFile(ContentFile):
def chunks(self):
time.sleep(1)
return super().chunks()
class FileSaveRaceConditionTest(SimpleTestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
self.thread = threading.Thread(target=self.save_file, args=['conflict'])
def tearDown(self):
shutil.rmtree(self.storage_dir)
def save_file(self, name):
name = self.storage.save(name, SlowFile(b"Data"))
def test_race_condition(self):
self.thread.start()
self.save_file('conflict')
self.thread.join()
files = sorted(os.listdir(self.storage_dir))
self.assertEqual(files[0], 'conflict')
self.assertRegex(files[1], 'conflict_%s' % FILE_SUFFIX_REGEX)
@unittest.skipIf(sys.platform.startswith('win'), "Windows only partially supports umasks and chmod.")
class FileStoragePermissions(unittest.TestCase):
def setUp(self):
self.umask = 0o027
self.old_umask = os.umask(self.umask)
self.storage_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.storage_dir)
os.umask(self.old_umask)
@override_settings(FILE_UPLOAD_PERMISSIONS=0o654)
def test_file_upload_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_file", ContentFile("data"))
actual_mode = os.stat(self.storage.path(name))[0] & 0o777
self.assertEqual(actual_mode, 0o654)
@override_settings(FILE_UPLOAD_PERMISSIONS=None)
def test_file_upload_default_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
fname = self.storage.save("some_file", ContentFile("data"))
mode = os.stat(self.storage.path(fname))[0] & 0o777
self.assertEqual(mode, 0o666 & ~self.umask)
@override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o765)
def test_file_upload_directory_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_directory/the_file", ContentFile("data"))
dir_mode = os.stat(os.path.dirname(self.storage.path(name)))[0] & 0o777
self.assertEqual(dir_mode, 0o765)
@override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=None)
def test_file_upload_directory_default_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_directory/the_file", ContentFile("data"))
dir_mode = os.stat(os.path.dirname(self.storage.path(name)))[0] & 0o777
self.assertEqual(dir_mode, 0o777 & ~self.umask)
class FileStoragePathParsing(SimpleTestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_directory_with_dot(self):
"""Regression test for #9610.
If the directory name contains a dot and the file name doesn't, make
sure we still mangle the file name instead of the directory name.
"""
self.storage.save('dotted.path/test', ContentFile("1"))
self.storage.save('dotted.path/test', ContentFile("2"))
files = sorted(os.listdir(os.path.join(self.storage_dir, 'dotted.path')))
self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path')))
self.assertEqual(files[0], 'test')
self.assertRegex(files[1], 'test_%s' % FILE_SUFFIX_REGEX)
def test_first_character_dot(self):
"""
File names with a dot as their first character don't have an extension,
and the underscore should get added to the end.
"""
self.storage.save('dotted.path/.test', ContentFile("1"))
self.storage.save('dotted.path/.test', ContentFile("2"))
files = sorted(os.listdir(os.path.join(self.storage_dir, 'dotted.path')))
self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path')))
self.assertEqual(files[0], '.test')
self.assertRegex(files[1], '.test_%s' % FILE_SUFFIX_REGEX)
class ContentFileStorageTestCase(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_content_saving(self):
"""
ContentFile can be saved correctly with the filesystem storage,
if it was initialized with either bytes or unicode content.
"""
self.storage.save('bytes.txt', ContentFile(b"content"))
self.storage.save('unicode.txt', ContentFile("español"))
@override_settings(ROOT_URLCONF='file_storage.urls')
class FileLikeObjectTestCase(LiveServerTestCase):
"""
Test file-like objects (#15644).
"""
available_apps = []
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(location=self.temp_dir)
def tearDown(self):
shutil.rmtree(self.temp_dir)
def test_urllib_request_urlopen(self):
"""
Test the File storage API with a file-like object coming from
urllib.request.urlopen().
"""
file_like_object = urlopen(self.live_server_url + '/')
f = File(file_like_object)
stored_filename = self.storage.save("remote_file.html", f)
remote_file = urlopen(self.live_server_url + '/')
with self.storage.open(stored_filename) as stored_file:
self.assertEqual(stored_file.read(), remote_file.read())
| 40.470705
| 113
| 0.65331
|
import os
import shutil
import sys
import tempfile
import threading
import time
import unittest
from datetime import datetime, timedelta
from io import StringIO
from urllib.request import urlopen
from django.core.cache import cache
from django.core.exceptions import SuspiciousFileOperation
from django.core.files.base import ContentFile, File
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.core.files.uploadedfile import (
InMemoryUploadedFile, SimpleUploadedFile, TemporaryUploadedFile,
)
from django.db.models.fields.files import FileDescriptor
from django.test import (
LiveServerTestCase, SimpleTestCase, TestCase, override_settings,
)
from django.test.utils import requires_tz_support
from django.urls import NoReverseMatch, reverse_lazy
from django.utils import timezone
from .models import Storage, temp_storage, temp_storage_location
FILE_SUFFIX_REGEX = '[A-Za-z0-9]{7}'
class GetStorageClassTests(SimpleTestCase):
def test_get_filesystem_storage(self):
self.assertEqual(
get_storage_class('django.core.files.storage.FileSystemStorage'),
FileSystemStorage)
def test_get_invalid_storage_module(self):
with self.assertRaisesMessage(ImportError, "No module named 'storage'"):
get_storage_class('storage.NonexistentStorage')
def test_get_nonexistent_storage_class(self):
with self.assertRaises(ImportError):
get_storage_class('django.core.files.storage.NonexistentStorage')
def test_get_nonexistent_storage_module(self):
with self.assertRaisesMessage(ImportError, "No module named 'django.core.files.nonexistent_storage'"):
get_storage_class('django.core.files.nonexistent_storage.NonexistentStorage')
class FileSystemStorageTests(unittest.TestCase):
def test_deconstruction(self):
path, args, kwargs = temp_storage.deconstruct()
self.assertEqual(path, "django.core.files.storage.FileSystemStorage")
self.assertEqual(args, ())
self.assertEqual(kwargs, {'location': temp_storage_location})
kwargs_orig = {
'location': temp_storage_location,
'base_url': 'http://myfiles.example.com/'
}
storage = FileSystemStorage(**kwargs_orig)
path, args, kwargs = storage.deconstruct()
self.assertEqual(kwargs, kwargs_orig)
def test_lazy_base_url_init(self):
storage = FileSystemStorage(base_url=reverse_lazy('app:url'))
with self.assertRaises(NoReverseMatch):
storage.url(storage.base_url)
class FileStorageTests(SimpleTestCase):
storage_class = FileSystemStorage
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = self.storage_class(location=self.temp_dir, base_url='/test_media_url/')
self.temp_dir2 = tempfile.mkdtemp(suffix='aBc')
def tearDown(self):
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.temp_dir2)
def test_empty_location(self):
storage = self.storage_class(location='')
self.assertEqual(storage.base_location, '')
self.assertEqual(storage.location, os.getcwd())
def test_file_access_options(self):
self.assertFalse(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'w')
f.write('storage contents')
f.close()
self.assertTrue(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'r')
self.assertEqual(f.read(), 'storage contents')
f.close()
self.storage.delete('storage_test')
self.assertFalse(self.storage.exists('storage_test'))
def _test_file_time_getter(self, getter):
self._test_file_time_getter_tz_handling_on(getter)
self._test_file_time_getter_tz_handling_off(getter)
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Algiers')
def _test_file_time_getter_tz_handling_on(self, getter):
# Django's TZ (and hence the system TZ) is set to Africa/Algiers which
# different.
now_in_algiers = timezone.make_aware(datetime.now())
with timezone.override(timezone.get_fixed_timezone(-300)):
# At this point the system TZ is +1 and the Django TZ
# is -5. The following will be aware in UTC.
now = timezone.now()
self.assertFalse(self.storage.exists('test.file.tz.on'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file.tz.on', f)
self.addCleanup(self.storage.delete, f_name)
dt = getter(f_name)
# dt should be aware, in UTC
self.assertTrue(timezone.is_aware(dt))
self.assertEqual(now.tzname(), dt.tzname())
# The three timezones are indeed distinct.
naive_now = datetime.now()
algiers_offset = now_in_algiers.tzinfo.utcoffset(naive_now)
django_offset = timezone.get_current_timezone().utcoffset(naive_now)
utc_offset = timezone.utc.utcoffset(naive_now)
self.assertGreater(algiers_offset, utc_offset)
self.assertLess(django_offset, utc_offset)
# dt and now should be the same effective time.
self.assertLess(abs(dt - now), timedelta(seconds=2))
@override_settings(USE_TZ=False, TIME_ZONE='Africa/Algiers')
def _test_file_time_getter_tz_handling_off(self, getter):
# Django's TZ (and hence the system TZ) is set to Africa/Algiers which
# different.
now_in_algiers = timezone.make_aware(datetime.now())
with timezone.override(timezone.get_fixed_timezone(-300)):
# At this point the system TZ is +1 and the Django TZ
# is -5.
self.assertFalse(self.storage.exists('test.file.tz.off'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file.tz.off', f)
self.addCleanup(self.storage.delete, f_name)
dt = getter(f_name)
# dt should be naive, in system (+1) TZ
self.assertTrue(timezone.is_naive(dt))
# The three timezones are indeed distinct.
naive_now = datetime.now()
algiers_offset = now_in_algiers.tzinfo.utcoffset(naive_now)
django_offset = timezone.get_current_timezone().utcoffset(naive_now)
utc_offset = timezone.utc.utcoffset(naive_now)
self.assertGreater(algiers_offset, utc_offset)
self.assertLess(django_offset, utc_offset)
# dt and naive_now should be the same effective time.
self.assertLess(abs(dt - naive_now), timedelta(seconds=2))
# If we convert dt to an aware object using the Algiers
# timezone then it should be the same effective time to
# now_in_algiers.
_dt = timezone.make_aware(dt, now_in_algiers.tzinfo)
self.assertLess(abs(_dt - now_in_algiers), timedelta(seconds=2))
def test_file_get_accessed_time(self):
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.addCleanup(self.storage.delete, f_name)
atime = self.storage.get_accessed_time(f_name)
self.assertEqual(atime, datetime.fromtimestamp(os.path.getatime(self.storage.path(f_name))))
self.assertLess(timezone.now() - self.storage.get_accessed_time(f_name), timedelta(seconds=2))
@requires_tz_support
def test_file_get_accessed_time_timezone(self):
self._test_file_time_getter(self.storage.get_accessed_time)
def test_file_get_created_time(self):
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.addCleanup(self.storage.delete, f_name)
ctime = self.storage.get_created_time(f_name)
self.assertEqual(ctime, datetime.fromtimestamp(os.path.getctime(self.storage.path(f_name))))
self.assertLess(timezone.now() - self.storage.get_created_time(f_name), timedelta(seconds=2))
@requires_tz_support
def test_file_get_created_time_timezone(self):
self._test_file_time_getter(self.storage.get_created_time)
def test_file_get_modified_time(self):
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.addCleanup(self.storage.delete, f_name)
mtime = self.storage.get_modified_time(f_name)
self.assertEqual(mtime, datetime.fromtimestamp(os.path.getmtime(self.storage.path(f_name))))
self.assertLess(timezone.now() - self.storage.get_modified_time(f_name), timedelta(seconds=2))
@requires_tz_support
def test_file_get_modified_time_timezone(self):
self._test_file_time_getter(self.storage.get_modified_time)
def test_file_save_without_name(self):
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f.name = 'test.file'
storage_f_name = self.storage.save(None, f)
self.assertEqual(storage_f_name, f.name)
self.assertTrue(os.path.exists(os.path.join(self.temp_dir, f.name)))
self.storage.delete(storage_f_name)
def test_file_save_with_path(self):
self.assertFalse(self.storage.exists('path/to'))
self.storage.save('path/to/test.file', ContentFile('file saved with path'))
self.assertTrue(self.storage.exists('path/to'))
with self.storage.open('path/to/test.file') as f:
self.assertEqual(f.read(), b'file saved with path')
self.assertTrue(os.path.exists(
os.path.join(self.temp_dir, 'path', 'to', 'test.file')))
self.storage.delete('path/to/test.file')
def test_save_doesnt_close(self):
with TemporaryUploadedFile('test', 'text/plain', 1, 'utf8') as file:
file.write(b'1')
file.seek(0)
self.assertFalse(file.closed)
self.storage.save('path/to/test.file', file)
self.assertFalse(file.closed)
self.assertFalse(file.file.closed)
file = InMemoryUploadedFile(StringIO('1'), '', 'test', 'text/plain', 1, 'utf8')
with file:
self.assertFalse(file.closed)
self.storage.save('path/to/test.file', file)
self.assertFalse(file.closed)
self.assertFalse(file.file.closed)
def test_file_path(self):
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.assertEqual(self.storage.path(f_name), os.path.join(self.temp_dir, f_name))
self.storage.delete(f_name)
def test_file_url(self):
self.assertEqual(self.storage.url('test.file'), self.storage.base_url + 'test.file')
# should encode special chars except ~!*()'
self.assertEqual(
self.storage.url(r"~!*()'@#$%^&*abc`+ =.file"),
"/test_media_url/~!*()'%40%23%24%25%5E%26*abc%60%2B%20%3D.file"
)
self.assertEqual(self.storage.url("ab\0c"), "/test_media_url/ab%00c")
self.assertEqual(self.storage.url("""a/b\\c.file"""), "/test_media_url/a/b/c.file")
vil.com")
self.assertEqual(self.storage.url(r"\evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url("///evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url(r"\\\evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url(None), "/test_media_url/")
def test_base_url(self):
self.storage.base_url = None
with self.assertRaises(ValueError):
self.storage.url('test.file')
e_url='/no_ending_slash')
self.assertEqual(
storage.url('test.file'),
'%s%s' % (storage.base_url, 'test.file')
)
def test_listdir(self):
self.assertFalse(self.storage.exists('storage_test_1'))
self.assertFalse(self.storage.exists('storage_test_2'))
self.assertFalse(self.storage.exists('storage_dir_1'))
self.storage.save('storage_test_1', ContentFile('custom content'))
self.storage.save('storage_test_2', ContentFile('custom content'))
os.mkdir(os.path.join(self.temp_dir, 'storage_dir_1'))
dirs, files = self.storage.listdir('')
self.assertEqual(set(dirs), {'storage_dir_1'})
self.assertEqual(set(files), {'storage_test_1', 'storage_test_2'})
self.storage.delete('storage_test_1')
self.storage.delete('storage_test_2')
os.rmdir(os.path.join(self.temp_dir, 'storage_dir_1'))
def test_file_storage_prevents_directory_traversal(self):
with self.assertRaises(SuspiciousFileOperation):
self.storage.exists('..')
with self.assertRaises(SuspiciousFileOperation):
self.storage.exists('/etc/passwd')
def test_file_storage_preserves_filename_case(self):
other_temp_storage = self.storage_class(location=self.temp_dir2)
mixed_case = 'CaSe_SeNsItIvE'
file = other_temp_storage.open(mixed_case, 'w')
file.write('storage contents')
file.close()
self.assertEqual(os.path.join(self.temp_dir2, mixed_case), other_temp_storage.path(mixed_case))
other_temp_storage.delete(mixed_case)
def test_makedirs_race_handling(self):
real_makedirs = os.makedirs
def fake_makedirs(path, mode=0o777, exist_ok=False):
if path == os.path.join(self.temp_dir, 'normal'):
real_makedirs(path, mode, exist_ok)
elif path == os.path.join(self.temp_dir, 'raced'):
real_makedirs(path, mode, exist_ok)
if not exist_ok:
raise FileExistsError()
elif path == os.path.join(self.temp_dir, 'error'):
raise PermissionError()
else:
self.fail('unexpected argument %r' % path)
try:
os.makedirs = fake_makedirs
self.storage.save('normal/test.file', ContentFile('saved normally'))
with self.storage.open('normal/test.file') as f:
self.assertEqual(f.read(), b'saved normally')
self.storage.save('raced/test.file', ContentFile('saved with race'))
with self.storage.open('raced/test.file') as f:
self.assertEqual(f.read(), b'saved with race')
with self.assertRaises(PermissionError):
self.storage.save('error/test.file', ContentFile('not saved'))
finally:
os.makedirs = real_makedirs
def test_remove_race_handling(self):
real_remove = os.remove
def fake_remove(path):
if path == os.path.join(self.temp_dir, 'normal.file'):
real_remove(path)
elif path == os.path.join(self.temp_dir, 'raced.file'):
real_remove(path)
raise FileNotFoundError()
elif path == os.path.join(self.temp_dir, 'error.file'):
raise PermissionError()
else:
self.fail('unexpected argument %r' % path)
try:
os.remove = fake_remove
self.storage.save('normal.file', ContentFile('delete normally'))
self.storage.delete('normal.file')
self.assertFalse(self.storage.exists('normal.file'))
self.storage.save('raced.file', ContentFile('delete with race'))
self.storage.delete('raced.file')
self.assertFalse(self.storage.exists('normal.file'))
self.storage.save('error.file', ContentFile('delete with error'))
with self.assertRaises(PermissionError):
self.storage.delete('error.file')
finally:
os.remove = real_remove
def test_file_chunks_error(self):
f1 = ContentFile('chunks fails')
def failing_chunks():
raise OSError
f1.chunks = failing_chunks
with self.assertRaises(OSError):
self.storage.save('error.file', f1)
def test_delete_no_name(self):
with self.assertRaises(AssertionError):
self.storage.delete('')
def test_delete_deletes_directories(self):
tmp_dir = tempfile.mkdtemp(dir=self.storage.location)
self.storage.delete(tmp_dir)
self.assertFalse(os.path.exists(tmp_dir))
@override_settings(
MEDIA_ROOT='media_root',
MEDIA_URL='media_url/',
FILE_UPLOAD_PERMISSIONS=0o777,
FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o777,
)
def test_setting_changed(self):
storage = self.storage_class(
location='explicit_location',
base_url='explicit_base_url/',
file_permissions_mode=0o666,
directory_permissions_mode=0o666,
)
defaults_storage = self.storage_class()
settings = {
'MEDIA_ROOT': 'overridden_media_root',
'MEDIA_URL': 'overridden_media_url/',
'FILE_UPLOAD_PERMISSIONS': 0o333,
'FILE_UPLOAD_DIRECTORY_PERMISSIONS': 0o333,
}
with self.settings(**settings):
self.assertEqual(storage.base_location, 'explicit_location')
self.assertIn('explicit_location', storage.location)
self.assertEqual(storage.base_url, 'explicit_base_url/')
self.assertEqual(storage.file_permissions_mode, 0o666)
self.assertEqual(storage.directory_permissions_mode, 0o666)
self.assertEqual(defaults_storage.base_location, settings['MEDIA_ROOT'])
self.assertIn(settings['MEDIA_ROOT'], defaults_storage.location)
self.assertEqual(defaults_storage.base_url, settings['MEDIA_URL'])
self.assertEqual(defaults_storage.file_permissions_mode, settings['FILE_UPLOAD_PERMISSIONS'])
self.assertEqual(
defaults_storage.directory_permissions_mode, settings['FILE_UPLOAD_DIRECTORY_PERMISSIONS']
)
class CustomStorage(FileSystemStorage):
def get_available_name(self, name, max_length=None):
basename, *ext = name.split('.')
number = 2
while self.exists(name):
name = '.'.join([basename, str(number)] + ext)
number += 1
return name
class CustomStorageTests(FileStorageTests):
storage_class = CustomStorage
def test_custom_get_available_name(self):
first = self.storage.save('custom_storage', ContentFile('custom contents'))
self.assertEqual(first, 'custom_storage')
second = self.storage.save('custom_storage', ContentFile('more contents'))
self.assertEqual(second, 'custom_storage.2')
self.storage.delete(first)
self.storage.delete(second)
class OverwritingStorage(FileSystemStorage):
OS_OPEN_FLAGS = FileSystemStorage.OS_OPEN_FLAGS & ~os.O_EXCL
def get_available_name(self, name, max_length=None):
return name
class OverwritingStorageTests(FileStorageTests):
storage_class = OverwritingStorage
def test_save_overwrite_behavior(self):
name = 'test.file'
self.assertFalse(self.storage.exists(name))
content_1 = b'content one'
content_2 = b'second content'
f_1 = ContentFile(content_1)
f_2 = ContentFile(content_2)
stored_name_1 = self.storage.save(name, f_1)
try:
self.assertEqual(stored_name_1, name)
self.assertTrue(self.storage.exists(name))
self.assertTrue(os.path.exists(os.path.join(self.temp_dir, name)))
with self.storage.open(name) as fp:
self.assertEqual(fp.read(), content_1)
stored_name_2 = self.storage.save(name, f_2)
self.assertEqual(stored_name_2, name)
self.assertTrue(self.storage.exists(name))
self.assertTrue(os.path.exists(os.path.join(self.temp_dir, name)))
with self.storage.open(name) as fp:
self.assertEqual(fp.read(), content_2)
finally:
self.storage.delete(name)
class DiscardingFalseContentStorage(FileSystemStorage):
def _save(self, name, content):
if content:
return super()._save(name, content)
return ''
class DiscardingFalseContentStorageTests(FileStorageTests):
storage_class = DiscardingFalseContentStorage
def test_custom_storage_discarding_empty_content(self):
output = StringIO('content')
self.storage.save('tests/stringio', output)
self.assertTrue(self.storage.exists('tests/stringio'))
with self.storage.open('tests/stringio') as f:
self.assertEqual(f.read(), b'content')
class FileFieldStorageTests(TestCase):
def tearDown(self):
shutil.rmtree(temp_storage_location)
def _storage_max_filename_length(self, storage):
dir_to_test = storage.location
while not os.path.exists(dir_to_test):
dir_to_test = os.path.dirname(dir_to_test)
try:
return os.pathconf(dir_to_test, 'PC_NAME_MAX')
except Exception:
return 255 # Should be safe on most backends
def test_files(self):
self.assertIsInstance(Storage.normal, FileDescriptor)
# An object without a file has limited functionality.
obj1 = Storage()
self.assertEqual(obj1.normal.name, "")
with self.assertRaises(ValueError):
obj1.normal.size
# Saving a file enables full functionality.
obj1.normal.save("django_test.txt", ContentFile("content"))
self.assertEqual(obj1.normal.name, "tests/django_test.txt")
self.assertEqual(obj1.normal.size, 7)
self.assertEqual(obj1.normal.read(), b"content")
obj1.normal.close()
# File objects can be assigned to FileField attributes, but shouldn't
obj1.normal = SimpleUploadedFile("assignment.txt", b"content")
dirs, files = temp_storage.listdir("tests")
self.assertEqual(dirs, [])
self.assertNotIn("assignment.txt", files)
obj1.save()
dirs, files = temp_storage.listdir("tests")
self.assertEqual(sorted(files), ["assignment.txt", "django_test.txt"])
# Save another file with the same name.
obj2 = Storage()
obj2.normal.save("django_test.txt", ContentFile("more content"))
obj2_name = obj2.normal.name
self.assertRegex(obj2_name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX)
self.assertEqual(obj2.normal.size, 12)
obj2.normal.close()
# Deleting an object does not delete the file it uses.
obj2.delete()
obj2.normal.save("django_test.txt", ContentFile("more content"))
self.assertNotEqual(obj2_name, obj2.normal.name)
self.assertRegex(obj2.normal.name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX)
obj2.normal.close()
def test_filefield_read(self):
# Files can be read in a little at a time, if necessary.
obj = Storage.objects.create(
normal=SimpleUploadedFile("assignment.txt", b"content"))
obj.normal.open()
self.assertEqual(obj.normal.read(3), b"con")
self.assertEqual(obj.normal.read(), b"tent")
self.assertEqual(list(obj.normal.chunks(chunk_size=2)), [b"co", b"nt", b"en", b"t"])
obj.normal.close()
def test_filefield_write(self):
# Files can be written to.
obj = Storage.objects.create(normal=SimpleUploadedFile('rewritten.txt', b'content'))
with obj.normal as normal:
normal.open('wb')
normal.write(b'updated')
obj.refresh_from_db()
self.assertEqual(obj.normal.read(), b'updated')
obj.normal.close()
def test_filefield_reopen(self):
obj = Storage.objects.create(normal=SimpleUploadedFile('reopen.txt', b'content'))
with obj.normal as normal:
normal.open()
obj.normal.open()
obj.normal.file.seek(0)
obj.normal.close()
def test_duplicate_filename(self):
# Multiple files with the same name get _(7 random chars) appended to them.
objs = [Storage() for i in range(2)]
for o in objs:
o.normal.save("multiple_files.txt", ContentFile("Same Content"))
try:
names = [o.normal.name for o in objs]
self.assertEqual(names[0], "tests/multiple_files.txt")
self.assertRegex(names[1], "tests/multiple_files_%s.txt" % FILE_SUFFIX_REGEX)
finally:
for o in objs:
o.delete()
def test_file_truncation(self):
# Given the max_length is limited, when multiple files get uploaded
# under the same name, then the filename get truncated in order to fit
# in _(7 random chars). When most of the max_length is taken by
# dirname + extension and there are not enough characters in the
# filename to truncate, an exception should be raised.
objs = [Storage() for i in range(2)]
filename = 'filename.ext'
for o in objs:
o.limited_length.save(filename, ContentFile('Same Content'))
try:
# Testing truncation.
names = [o.limited_length.name for o in objs]
self.assertEqual(names[0], 'tests/%s' % filename)
self.assertRegex(names[1], 'tests/fi_%s.ext' % FILE_SUFFIX_REGEX)
# Testing exception is raised when filename is too short to truncate.
filename = 'short.longext'
objs[0].limited_length.save(filename, ContentFile('Same Content'))
with self.assertRaisesMessage(SuspiciousFileOperation, 'Storage can not find an available filename'):
objs[1].limited_length.save(*(filename, ContentFile('Same Content')))
finally:
for o in objs:
o.delete()
@unittest.skipIf(
sys.platform.startswith('win'),
"Windows supports at most 260 characters in a path.",
)
def test_extended_length_storage(self):
# Testing FileField with max_length > 255. Most systems have filename
# length limitation of 255. Path takes extra chars.
filename = (self._storage_max_filename_length(temp_storage) - 4) * 'a' # 4 chars for extension.
obj = Storage()
obj.extended_length.save('%s.txt' % filename, ContentFile('Same Content'))
self.assertEqual(obj.extended_length.name, 'tests/%s.txt' % filename)
self.assertEqual(obj.extended_length.read(), b'Same Content')
obj.extended_length.close()
def test_filefield_default(self):
# Default values allow an object to access a single file.
temp_storage.save('tests/default.txt', ContentFile('default content'))
obj = Storage.objects.create()
self.assertEqual(obj.default.name, "tests/default.txt")
self.assertEqual(obj.default.read(), b"default content")
obj.default.close()
# But it shouldn't be deleted, even if there are no more objects using
obj.delete()
obj = Storage()
self.assertEqual(obj.default.read(), b"default content")
obj.default.close()
def test_empty_upload_to(self):
obj = Storage()
obj.empty.save('django_test.txt', ContentFile('more content'))
self.assertEqual(obj.empty.name, "django_test.txt")
self.assertEqual(obj.empty.read(), b"more content")
obj.empty.close()
def test_pathlib_upload_to(self):
obj = Storage()
obj.pathlib_callable.save('some_file1.txt', ContentFile('some content'))
self.assertEqual(obj.pathlib_callable.name, 'bar/some_file1.txt')
obj.pathlib_direct.save('some_file2.txt', ContentFile('some content'))
self.assertEqual(obj.pathlib_direct.name, 'bar/some_file2.txt')
obj.random.close()
def test_random_upload_to(self):
obj.random.save("random_file", ContentFile("random content"))
self.assertTrue(obj.random.name.endswith("/random_file"))
obj.random.close()
def test_custom_valid_name_callable_upload_to(self):
obj = Storage()
obj.custom_valid_name.save("random_file", ContentFile("random content"))
self.assertTrue(obj.custom_valid_name.name.endswith("/random_file_valid"))
obj.custom_valid_name.close()
def test_filefield_pickling(self):
obj = Storage()
obj.normal.save("django_test.txt", ContentFile("more content"))
obj.normal.close()
cache.set("obj", obj)
self.assertEqual(cache.get("obj").normal.name, "tests/django_test.txt")
def test_file_object(self):
temp_storage.save('tests/example.txt', ContentFile('some content'))
with open(temp_storage.path('tests/example.txt')) as file_obj:
temp_storage.save('tests/file_obj', file_obj)
self.assertTrue(temp_storage.exists('tests/file_obj'))
with temp_storage.open('tests/file_obj') as f:
self.assertEqual(f.read(), b'some content')
def test_stringio(self):
output = StringIO()
output.write('content')
output.seek(0)
temp_storage.save('tests/stringio', output)
self.assertTrue(temp_storage.exists('tests/stringio'))
with temp_storage.open('tests/stringio') as f:
self.assertEqual(f.read(), b'content')
hout threading.
class SlowFile(ContentFile):
def chunks(self):
time.sleep(1)
return super().chunks()
class FileSaveRaceConditionTest(SimpleTestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
self.thread = threading.Thread(target=self.save_file, args=['conflict'])
def tearDown(self):
shutil.rmtree(self.storage_dir)
def save_file(self, name):
name = self.storage.save(name, SlowFile(b"Data"))
def test_race_condition(self):
self.thread.start()
self.save_file('conflict')
self.thread.join()
files = sorted(os.listdir(self.storage_dir))
self.assertEqual(files[0], 'conflict')
self.assertRegex(files[1], 'conflict_%s' % FILE_SUFFIX_REGEX)
@unittest.skipIf(sys.platform.startswith('win'), "Windows only partially supports umasks and chmod.")
class FileStoragePermissions(unittest.TestCase):
def setUp(self):
self.umask = 0o027
self.old_umask = os.umask(self.umask)
self.storage_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.storage_dir)
os.umask(self.old_umask)
@override_settings(FILE_UPLOAD_PERMISSIONS=0o654)
def test_file_upload_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_file", ContentFile("data"))
actual_mode = os.stat(self.storage.path(name))[0] & 0o777
self.assertEqual(actual_mode, 0o654)
@override_settings(FILE_UPLOAD_PERMISSIONS=None)
def test_file_upload_default_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
fname = self.storage.save("some_file", ContentFile("data"))
mode = os.stat(self.storage.path(fname))[0] & 0o777
self.assertEqual(mode, 0o666 & ~self.umask)
@override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o765)
def test_file_upload_directory_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_directory/the_file", ContentFile("data"))
dir_mode = os.stat(os.path.dirname(self.storage.path(name)))[0] & 0o777
self.assertEqual(dir_mode, 0o765)
@override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=None)
def test_file_upload_directory_default_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_directory/the_file", ContentFile("data"))
dir_mode = os.stat(os.path.dirname(self.storage.path(name)))[0] & 0o777
self.assertEqual(dir_mode, 0o777 & ~self.umask)
class FileStoragePathParsing(SimpleTestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_directory_with_dot(self):
self.storage.save('dotted.path/test', ContentFile("1"))
self.storage.save('dotted.path/test', ContentFile("2"))
files = sorted(os.listdir(os.path.join(self.storage_dir, 'dotted.path')))
self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path')))
self.assertEqual(files[0], 'test')
self.assertRegex(files[1], 'test_%s' % FILE_SUFFIX_REGEX)
def test_first_character_dot(self):
self.storage.save('dotted.path/.test', ContentFile("1"))
self.storage.save('dotted.path/.test', ContentFile("2"))
files = sorted(os.listdir(os.path.join(self.storage_dir, 'dotted.path')))
self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path')))
self.assertEqual(files[0], '.test')
self.assertRegex(files[1], '.test_%s' % FILE_SUFFIX_REGEX)
class ContentFileStorageTestCase(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_content_saving(self):
self.storage.save('bytes.txt', ContentFile(b"content"))
self.storage.save('unicode.txt', ContentFile("español"))
@override_settings(ROOT_URLCONF='file_storage.urls')
class FileLikeObjectTestCase(LiveServerTestCase):
available_apps = []
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(location=self.temp_dir)
def tearDown(self):
shutil.rmtree(self.temp_dir)
def test_urllib_request_urlopen(self):
file_like_object = urlopen(self.live_server_url + '/')
f = File(file_like_object)
stored_filename = self.storage.save("remote_file.html", f)
remote_file = urlopen(self.live_server_url + '/')
with self.storage.open(stored_filename) as stored_file:
self.assertEqual(stored_file.read(), remote_file.read())
| true
| true
|
1c41768beb0f8028fb918ee5a723b4aee1287030
| 1,678
|
py
|
Python
|
source/tests/csvImplementation_test.py
|
staujd02/Pi-RFID-Video-Player
|
613d5a9355b660afb5414b3f4a9dad219b69fc36
|
[
"Apache-2.0"
] | 1
|
2020-02-15T15:21:03.000Z
|
2020-02-15T15:21:03.000Z
|
source/tests/csvImplementation_test.py
|
staujd02/Pi-RFID-Video-Player
|
613d5a9355b660afb5414b3f4a9dad219b69fc36
|
[
"Apache-2.0"
] | 8
|
2019-12-14T16:31:13.000Z
|
2021-05-22T23:06:35.000Z
|
source/tests/csvImplementation_test.py
|
staujd02/Pi-RFID-Video-Player
|
613d5a9355b660afb5414b3f4a9dad219b69fc36
|
[
"Apache-2.0"
] | null | null | null |
import unittest
import os
from source.informationManagers.dataStorageMethods.csvImplementation import CSVImplementation
from source.informationManagers.dataStorageMethods.database import Database
class CSVImplementation_test(unittest.TestCase):
TEST_DB = "TestDb.csv"
def test_csv_can_query_by_the_first_column(self):
self.assertEqual(self.db.query("2"), ["Donkey", "Dreary"])
def test_csv_can_query_by_the_first_column_and_get_the_last_item(self):
self.assertEqual(self.db.query("5"), ["Horse", "Champion"])
def test_csv_can_update_an_entry(self):
self.db.update("5", ["Horse", "Starlight"])
self.assertEqual(self.db.query("5"), ["Horse", "Starlight"])
def test_csv_can_save_modifications_to_data(self):
self.db.update("5", ["Horse", "Dusty"])
self.db.save(self.TEST_DB)
self.db.load(self.TEST_DB)
self.assertEqual(self.db.query("5"), ["Horse", "Dusty"])
self.assertEqual(self.db.query("2"), ["Donkey", "Dreary"])
def test_csv_can_be_iterated(self):
iterator = self.db.iterate()
self.assertEqual(["Monkey", "Melvin"], self.db.query(next(iterator)))
self.assertEqual(["Donkey", "Dreary"], self.db.query(next(iterator)))
self.assertEqual(["Horse", "Champion"], self.db.query(next(iterator)))
def setUp(self):
self.createTestCSV()
self.db = CSVImplementation.openDB(Database, self.TEST_DB)
def createTestCSV(self):
f = open(self.TEST_DB, "w")
f.writelines(["1,Monkey,Melvin\n", "2,Donkey,Dreary\n", "5,Horse,Champion"])
f.close()
def tearDown(self):
os.remove(self.TEST_DB)
| 38.136364
| 93
| 0.672229
|
import unittest
import os
from source.informationManagers.dataStorageMethods.csvImplementation import CSVImplementation
from source.informationManagers.dataStorageMethods.database import Database
class CSVImplementation_test(unittest.TestCase):
TEST_DB = "TestDb.csv"
def test_csv_can_query_by_the_first_column(self):
self.assertEqual(self.db.query("2"), ["Donkey", "Dreary"])
def test_csv_can_query_by_the_first_column_and_get_the_last_item(self):
self.assertEqual(self.db.query("5"), ["Horse", "Champion"])
def test_csv_can_update_an_entry(self):
self.db.update("5", ["Horse", "Starlight"])
self.assertEqual(self.db.query("5"), ["Horse", "Starlight"])
def test_csv_can_save_modifications_to_data(self):
self.db.update("5", ["Horse", "Dusty"])
self.db.save(self.TEST_DB)
self.db.load(self.TEST_DB)
self.assertEqual(self.db.query("5"), ["Horse", "Dusty"])
self.assertEqual(self.db.query("2"), ["Donkey", "Dreary"])
def test_csv_can_be_iterated(self):
iterator = self.db.iterate()
self.assertEqual(["Monkey", "Melvin"], self.db.query(next(iterator)))
self.assertEqual(["Donkey", "Dreary"], self.db.query(next(iterator)))
self.assertEqual(["Horse", "Champion"], self.db.query(next(iterator)))
def setUp(self):
self.createTestCSV()
self.db = CSVImplementation.openDB(Database, self.TEST_DB)
def createTestCSV(self):
f = open(self.TEST_DB, "w")
f.writelines(["1,Monkey,Melvin\n", "2,Donkey,Dreary\n", "5,Horse,Champion"])
f.close()
def tearDown(self):
os.remove(self.TEST_DB)
| true
| true
|
1c4177b9378b12a3aa1d7c8f56dd135d26e15a9d
| 3,889
|
py
|
Python
|
prediction/src/algorithms/classify/src/classification_model.py
|
yasiriqbal1/concept-to-clinic-1
|
3b7d34a6b31e8d3924934f3e5c990c49813c670e
|
[
"MIT"
] | 346
|
2017-08-04T12:26:11.000Z
|
2018-10-16T06:51:45.000Z
|
prediction/src/algorithms/classify/src/classification_model.py
|
yasiriqbal1/concept-to-clinic-1
|
3b7d34a6b31e8d3924934f3e5c990c49813c670e
|
[
"MIT"
] | 296
|
2017-08-02T10:17:05.000Z
|
2018-07-31T05:29:43.000Z
|
prediction/src/algorithms/classify/src/classification_model.py
|
yasiriqbal1/concept-to-clinic-1
|
3b7d34a6b31e8d3924934f3e5c990c49813c670e
|
[
"MIT"
] | 159
|
2017-08-04T07:34:52.000Z
|
2018-10-16T18:34:08.000Z
|
import abc
import collections
class ClassificationModel(abc.ABC):
"""
Args:
init_model (bool | str): whether to initialise the model.
If str, then the model will be loaded from the init_model
path.
pull_size (int): maximum amount of batches allowed to be stored in RAM.
"""
def __init__(self, init_model=True, pull_size=10, batch_size=32, data_format=None):
self.model = None
if not isinstance(batch_size, int):
raise ValueError('`batch_size` should be of type int')
if batch_size < 1:
raise ValueError('`batch_size` should be grater or equal to 1')
self.batch_size = batch_size
if not isinstance(pull_size, int):
raise ValueError('`pull_size` should be of type int')
if pull_size < 1:
raise ValueError('`pull_size` should be grater or equal to 1')
self.pull_ct = collections.deque(maxlen=pull_size)
self.pull_patches = []
self.data_format = data_format
self.set_params()
if init_model:
if isinstance(init_model, str):
self.model = self.load_model(init_model)
else:
self.model = self.init_model()
@abc.abstractmethod
def init_model(self):
pass
@abc.abstractmethod
def load_model(self, model_path):
"""
Load model method.
Args:
model_path (str): A path to the model.
Returns:
Model
"""
pass
@abc.abstractmethod
def _ct_preprocess(self, ct_path):
pass
@abc.abstractmethod
def _batch_process(self, batch, labels):
pass
@abc.abstractmethod
def feed(self, annotations, sampling_pure, sampling_cancerous):
"""
Train the model through the annotated CT scans
Args:
annotations (list[dict]): A list of centroids of the form::
{'file_path': str,
'centroids': [{'x': int,
'y': int,
'z': int,
'cancerous': bool}, ..]}.
sampling_pure (float): coefficient of .
sampling_cancerous (float): .
Yields:
list[np.nd array]: list of patches.
"""
pass
@abc.abstractmethod
def train(self, annotations, train_val_split):
"""
Train the model through the annotated CT scans
Args:
annotations (list[dict]): A list of centroids of the form::
{'file_path': str,
'centroids': [{'x': int,
'y': int,
'z': int,
'cancerous': bool}, ..]}.
Returns:
tf.model: a model trained over annotated data.
"""
pass
@abc.abstractmethod
def predict(self, candidates, model_path=None):
"""
Predict cancerous of given candidates.
Args:
candidates (list[dict]): A list of centroids of the form::
{'file_path': str,
'centroids': {'x': int,
'y': int,
'z': int}}
model_path (str): A path to the serialized model
Returns:
(list[dict]): A list of centroids of the form::
{'file_path': str,
'centroids': {'x': int,
'y': int,
'z': int,
'p_concerning': float}}
"""
pass
@abc.abstractmethod
def clear(self):
"""
Clear all signs of backend models.
"""
pass
| 29.687023
| 87
| 0.4901
|
import abc
import collections
class ClassificationModel(abc.ABC):
def __init__(self, init_model=True, pull_size=10, batch_size=32, data_format=None):
self.model = None
if not isinstance(batch_size, int):
raise ValueError('`batch_size` should be of type int')
if batch_size < 1:
raise ValueError('`batch_size` should be grater or equal to 1')
self.batch_size = batch_size
if not isinstance(pull_size, int):
raise ValueError('`pull_size` should be of type int')
if pull_size < 1:
raise ValueError('`pull_size` should be grater or equal to 1')
self.pull_ct = collections.deque(maxlen=pull_size)
self.pull_patches = []
self.data_format = data_format
self.set_params()
if init_model:
if isinstance(init_model, str):
self.model = self.load_model(init_model)
else:
self.model = self.init_model()
@abc.abstractmethod
def init_model(self):
pass
@abc.abstractmethod
def load_model(self, model_path):
pass
@abc.abstractmethod
def _ct_preprocess(self, ct_path):
pass
@abc.abstractmethod
def _batch_process(self, batch, labels):
pass
@abc.abstractmethod
def feed(self, annotations, sampling_pure, sampling_cancerous):
pass
@abc.abstractmethod
def train(self, annotations, train_val_split):
pass
@abc.abstractmethod
def predict(self, candidates, model_path=None):
pass
@abc.abstractmethod
def clear(self):
pass
| true
| true
|
1c4177cb554a2f15962cc028608078160d73c8e5
| 622
|
py
|
Python
|
tests/test_fixtures.py
|
adammichaelwood/omk_core
|
9f3a845aeadad0b1de91d7f20da3ae6b686a07d0
|
[
"MIT"
] | null | null | null |
tests/test_fixtures.py
|
adammichaelwood/omk_core
|
9f3a845aeadad0b1de91d7f20da3ae6b686a07d0
|
[
"MIT"
] | 3
|
2019-01-28T16:50:27.000Z
|
2019-02-20T01:55:19.000Z
|
tests/test_fixtures.py
|
adammichaelwood/omk_core
|
9f3a845aeadad0b1de91d7f20da3ae6b686a07d0
|
[
"MIT"
] | 1
|
2018-06-04T10:32:05.000Z
|
2018-06-04T10:32:05.000Z
|
import pytest
import omk_core as omk
@pytest.fixture
def tonal_tuples():
MS = [
(0, 0),
(1, 2),
(2, 4),
(3, 5),
(4, 7),
(5, 9),
(6,11)
]
return [(x[0],(x[1]+m)%12) for m in [0,1,2,-1,-2] for x in MS]
@pytest.fixture
def tonal_vectors(tonal_tuples):
return [omk.TonalVector(x) for x in tonal_tuples]
@pytest.fixture
def tonal_oct_tuples(tonal_tuples):
return [(x[0], x[1], y) for y in [0,1,2,-1,-2] for x in tonal_tuples]
@pytest.fixture
def tonal_oct_vectors(tonal_oct_tuples):
return [omk.TonalVector(x) for x in tonal_oct_tuples]
| 20.064516
| 73
| 0.585209
|
import pytest
import omk_core as omk
@pytest.fixture
def tonal_tuples():
MS = [
(0, 0),
(1, 2),
(2, 4),
(3, 5),
(4, 7),
(5, 9),
(6,11)
]
return [(x[0],(x[1]+m)%12) for m in [0,1,2,-1,-2] for x in MS]
@pytest.fixture
def tonal_vectors(tonal_tuples):
return [omk.TonalVector(x) for x in tonal_tuples]
@pytest.fixture
def tonal_oct_tuples(tonal_tuples):
return [(x[0], x[1], y) for y in [0,1,2,-1,-2] for x in tonal_tuples]
@pytest.fixture
def tonal_oct_vectors(tonal_oct_tuples):
return [omk.TonalVector(x) for x in tonal_oct_tuples]
| true
| true
|
1c417959169a956a504ba3f6f0e6131c46d10cbf
| 3,304
|
py
|
Python
|
athena/transform/feats/pitch_test.py
|
wgfi110/athena
|
e704884ec6a3a947769d892aa267578038e49ecb
|
[
"Apache-2.0"
] | 791
|
2019-12-22T03:09:04.000Z
|
2022-03-26T01:57:42.000Z
|
athena/transform/feats/pitch_test.py
|
wgfi110/athena
|
e704884ec6a3a947769d892aa267578038e49ecb
|
[
"Apache-2.0"
] | 198
|
2019-12-22T03:06:27.000Z
|
2022-03-29T02:57:59.000Z
|
athena/transform/feats/pitch_test.py
|
wgfi110/athena
|
e704884ec6a3a947769d892aa267578038e49ecb
|
[
"Apache-2.0"
] | 194
|
2019-12-24T03:59:29.000Z
|
2022-03-25T02:44:51.000Z
|
# Copyright (C) ATHENA AUTHORS
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The model tests pitch FE."""
import os
from pathlib import Path
import tensorflow as tf
from tensorflow.python.framework.ops import disable_eager_execution
from athena.transform.feats.read_wav import ReadWav
from athena.transform.feats.pitch import Pitch
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
class SpectrumTest(tf.test.TestCase):
"""Pitch extraction test."""
def test_spectrum(self):
"""Test Pitch using 16kHz && 8kHz wav."""
wav_path_16k = str(
Path(os.environ["MAIN_ROOT"]).joinpath("examples/sm1_cln.wav")
)
wav_path_8k = str(
Path(os.environ["MAIN_ROOT"]).joinpath("examples/english.wav")
)
with self.session():
for wav_file in [wav_path_16k]:
read_wav = ReadWav.params().instantiate()
input_data, sample_rate = read_wav(wav_file)
pitch = Pitch.params(
{"window_length": 0.025, "soft_min_f0": 10.0}
).instantiate()
pitch_test = pitch(input_data, sample_rate)
if tf.executing_eagerly():
self.assertEqual(tf.rank(pitch_test).numpy(), 2)
else:
self.assertEqual(tf.rank(pitch_test).eval(), 2)
output_true = [
[-0.1366025, 143.8855],
[-0.0226383, 143.8855],
[-0.08464742, 143.8855],
[-0.08458386, 143.8855],
[-0.1208689, 143.8855],
]
if wav_file == wav_path_16k:
if tf.executing_eagerly():
print("Transform: ", pitch_test.numpy()[0:5, :])
print("kaldi:", output_true)
self.assertAllClose(
pitch_test.numpy()[0:5, :],
output_true,
rtol=1e-05,
atol=1e-05,
)
else:
print("Transform: ", pitch_test.eval())
print("kaldi:", output_true)
self.assertAllClose(
pitch_test.eval()[0:5, :],
output_true,
rtol=1e-05,
atol=1e-05,
)
if __name__ == "__main__":
is_eager = True
if not is_eager:
disable_eager_execution()
else:
if tf.__version__ < "2.0.0":
tf.compat.v1.enable_eager_execution()
tf.test.main()
| 35.913043
| 80
| 0.51816
|
import os
from pathlib import Path
import tensorflow as tf
from tensorflow.python.framework.ops import disable_eager_execution
from athena.transform.feats.read_wav import ReadWav
from athena.transform.feats.pitch import Pitch
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
class SpectrumTest(tf.test.TestCase):
def test_spectrum(self):
wav_path_16k = str(
Path(os.environ["MAIN_ROOT"]).joinpath("examples/sm1_cln.wav")
)
wav_path_8k = str(
Path(os.environ["MAIN_ROOT"]).joinpath("examples/english.wav")
)
with self.session():
for wav_file in [wav_path_16k]:
read_wav = ReadWav.params().instantiate()
input_data, sample_rate = read_wav(wav_file)
pitch = Pitch.params(
{"window_length": 0.025, "soft_min_f0": 10.0}
).instantiate()
pitch_test = pitch(input_data, sample_rate)
if tf.executing_eagerly():
self.assertEqual(tf.rank(pitch_test).numpy(), 2)
else:
self.assertEqual(tf.rank(pitch_test).eval(), 2)
output_true = [
[-0.1366025, 143.8855],
[-0.0226383, 143.8855],
[-0.08464742, 143.8855],
[-0.08458386, 143.8855],
[-0.1208689, 143.8855],
]
if wav_file == wav_path_16k:
if tf.executing_eagerly():
print("Transform: ", pitch_test.numpy()[0:5, :])
print("kaldi:", output_true)
self.assertAllClose(
pitch_test.numpy()[0:5, :],
output_true,
rtol=1e-05,
atol=1e-05,
)
else:
print("Transform: ", pitch_test.eval())
print("kaldi:", output_true)
self.assertAllClose(
pitch_test.eval()[0:5, :],
output_true,
rtol=1e-05,
atol=1e-05,
)
if __name__ == "__main__":
is_eager = True
if not is_eager:
disable_eager_execution()
else:
if tf.__version__ < "2.0.0":
tf.compat.v1.enable_eager_execution()
tf.test.main()
| true
| true
|
1c4179c2af86b60e626c745a4b3a99ddb5f6562c
| 12,362
|
py
|
Python
|
tests/test_functional_config.py
|
smilers/sabnzbd
|
ed0e5bbf9b963113f4962e7aec297266f19d6615
|
[
"MIT",
"PSF-2.0",
"0BSD"
] | 1,354
|
2015-01-02T11:53:23.000Z
|
2022-03-30T01:32:46.000Z
|
tests/test_functional_config.py
|
smilers/sabnzbd
|
ed0e5bbf9b963113f4962e7aec297266f19d6615
|
[
"MIT",
"PSF-2.0",
"0BSD"
] | 1,742
|
2015-01-01T20:17:35.000Z
|
2022-03-30T21:05:00.000Z
|
tests/test_functional_config.py
|
smilers/sabnzbd
|
ed0e5bbf9b963113f4962e7aec297266f19d6615
|
[
"MIT",
"PSF-2.0",
"0BSD"
] | 410
|
2015-01-24T05:32:46.000Z
|
2022-03-20T12:56:49.000Z
|
#!/usr/bin/python3 -OO
# Copyright 2007-2021 The SABnzbd-Team <team@sabnzbd.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
tests.test_functional_config - Basic testing if Config pages work
"""
from selenium.common.exceptions import NoSuchElementException, UnexpectedAlertPresentException, NoAlertPresentException
from pytest_httpserver import HTTPServer
from tests.testhelper import *
class TestBasicPages(SABnzbdBaseTest):
def test_base_pages(self):
# Quick-check of all Config pages
test_urls = ["config", "config/server", "config/categories", "config/scheduling", "config/rss"]
for test_url in test_urls:
self.open_page("http://%s:%s/%s" % (SAB_HOST, SAB_PORT, test_url))
def test_base_submit_pages(self):
test_urls_with_submit = [
"config/general",
"config/folders",
"config/switches",
"config/sorting",
"config/notify",
"config/special",
]
for test_url in test_urls_with_submit:
self.open_page("http://%s:%s/%s" % (SAB_HOST, SAB_PORT, test_url))
# Can only click the visible buttons
submit_btns = self.selenium_wrapper(self.driver.find_elements_by_class_name, "saveButton")
for submit_btn in submit_btns:
if submit_btn.is_displayed():
break
else:
raise NoSuchElementException
# Click the right button
submit_btn.click()
try:
self.wait_for_ajax()
except UnexpectedAlertPresentException:
try:
# Ignore restart-request due to empty sabnzbd.ini in tests
self.selenium_wrapper(self.driver.switch_to.alert.dismiss)
except NoAlertPresentException:
pass
# For Specials page we get redirected after save, so check for no crash
if "special" in test_url:
self.no_page_crash()
else:
# For others if all is fine, button will be back to normal in 1 second
time.sleep(1.5)
assert submit_btn.text == "Save Changes"
class TestConfigLogin(SABnzbdBaseTest):
def test_login(self):
# Test if base page works
self.open_page("http://%s:%s/sabnzbd/config/general" % (SAB_HOST, SAB_PORT))
# Set the username and password
username_imp = self.selenium_wrapper(self.driver.find_element_by_css_selector, "input[data-hide='username']")
username_imp.clear()
username_imp.send_keys("test_username")
pass_inp = self.selenium_wrapper(self.driver.find_element_by_css_selector, "input[data-hide='password']")
pass_inp.clear()
pass_inp.send_keys("test_password")
# Submit and ignore alert
self.selenium_wrapper(self.driver.find_element_by_class_name, "saveButton").click()
try:
self.wait_for_ajax()
except UnexpectedAlertPresentException:
try:
# Ignore restart-request
self.driver.switch_to.alert.dismiss()
except NoAlertPresentException:
pass
# Open any page and check if we get redirected
self.open_page("http://%s:%s/sabnzbd/general" % (SAB_HOST, SAB_PORT))
assert "/login/" in self.driver.current_url
# Fill nonsense and submit
username_login = self.selenium_wrapper(self.driver.find_element_by_css_selector, "input[name='username']")
username_login.clear()
username_login.send_keys("nonsense")
pass_login = self.selenium_wrapper(self.driver.find_element_by_css_selector, "input[name='password']")
pass_login.clear()
pass_login.send_keys("nonsense")
self.driver.find_element_by_tag_name("button").click()
# Check if we were denied
assert (
"Authentication failed"
in self.selenium_wrapper(self.driver.find_element_by_class_name, "alert-danger").text
)
# Fill right stuff
username_login = self.selenium_wrapper(self.driver.find_element_by_css_selector, "input[name='username']")
username_login.clear()
username_login.send_keys("test_username")
pass_login = self.selenium_wrapper(self.driver.find_element_by_css_selector, "input[name='password']")
pass_login.clear()
pass_login.send_keys("test_password")
self.driver.find_element_by_tag_name("button").click()
# Can we now go to the page and empty the settings again?
self.open_page("http://%s:%s/sabnzbd/config/general" % (SAB_HOST, SAB_PORT))
assert "/login/" not in self.driver.current_url
# Set the username and password
username_imp = self.selenium_wrapper(self.driver.find_element_by_css_selector, "input[data-hide='username']")
username_imp.clear()
pass_inp = self.selenium_wrapper(self.driver.find_element_by_css_selector, "input[data-hide='password']")
pass_inp.clear()
# Submit and ignore alert
self.selenium_wrapper(self.driver.find_element_by_class_name, "saveButton").click()
try:
self.wait_for_ajax()
except UnexpectedAlertPresentException:
try:
# Ignore restart-request
self.driver.switch_to.alert.dismiss()
except NoAlertPresentException:
pass
# Open any page and check if we get redirected
self.open_page("http://%s:%s/sabnzbd/general" % (SAB_HOST, SAB_PORT))
assert "/login/" not in self.driver.current_url
class TestConfigCategories(SABnzbdBaseTest):
category_name = "testCat"
def test_page(self):
# Test if base page works
self.open_page("http://%s:%s/sabnzbd/config/categories" % (SAB_HOST, SAB_PORT))
# Add new category
self.driver.find_elements_by_name("newname")[1].send_keys("testCat")
self.selenium_wrapper(
self.driver.find_element_by_xpath, "//button/text()[normalize-space(.)='Add']/parent::*"
).click()
self.no_page_crash()
assert self.category_name not in self.driver.page_source
class TestConfigRSS(SABnzbdBaseTest):
rss_name = "_SeleniumFeed"
def test_rss_basic_flow(self, httpserver: HTTPServer):
# Setup the response for the NZB
nzb_data = create_and_read_nzb("basic_rar5")
httpserver.expect_request("/test_nzb.nzb").respond_with_data(nzb_data)
nzb_url = httpserver.url_for("/test_nzb.nzb")
# Set the response for the RSS-feed, replacing the URL to the NZB
with open(os.path.join(SAB_DATA_DIR, "rss_feed_test.xml")) as rss_file:
rss_data = rss_file.read()
rss_data = rss_data.replace("NZB_URL", nzb_url)
httpserver.expect_request("/rss_feed.xml").respond_with_data(rss_data)
rss_url = httpserver.url_for("/rss_feed.xml")
# Test if base page works
self.open_page("http://%s:%s/sabnzbd/config/rss" % (SAB_HOST, SAB_PORT))
# Uncheck enabled-checkbox for new feeds
self.selenium_wrapper(
self.driver.find_element_by_xpath, '//form[@action="add_rss_feed"]//input[@name="enable"]'
).click()
input_name = self.selenium_wrapper(
self.driver.find_element_by_xpath, '//form[@action="add_rss_feed"]//input[@name="feed"]'
)
input_name.clear()
input_name.send_keys(self.rss_name)
self.selenium_wrapper(
self.driver.find_element_by_xpath, '//form[@action="add_rss_feed"]//input[@name="uri"]'
).send_keys(rss_url)
self.selenium_wrapper(self.driver.find_element_by_xpath, '//form[@action="add_rss_feed"]//button').click()
# Check if we have results
tab_results = int(
self.selenium_wrapper(self.driver.find_element_by_xpath, '//a[@href="#rss-tab-matched"]/span').text
)
assert tab_results > 0
# Check if it matches the number of rows
tab_table_results = len(self.driver.find_elements_by_xpath('//div[@id="rss-tab-matched"]/table/tbody/tr'))
assert tab_table_results == tab_results
# Pause the queue do we don't download stuff
assert get_api_result("pause") == {"status": True}
# Download something
download_btn = self.selenium_wrapper(
self.driver.find_element_by_xpath, '//div[@id="rss-tab-matched"]/table/tbody//button'
)
download_btn.click()
self.wait_for_ajax()
# Does the page think it's a success?
assert "Added NZB" in download_btn.text
# Wait 2 seconds for the fetch
time.sleep(2)
# Let's check the queue
for _ in range(10):
queue_result_slots = get_api_result("queue")["queue"]["slots"]
# Check if the fetch-request was added to the queue
if queue_result_slots:
break
time.sleep(1)
else:
# The loop never stopped, so we fail
pytest.fail("Did not find the RSS job in the queue")
return
# Let's remove this thing
get_api_result("queue", extra_arguments={"name": "delete", "value": "all"})
assert len(get_api_result("queue")["queue"]["slots"]) == 0
# Unpause
assert get_api_result("resume") == {"status": True}
class TestConfigServers(SABnzbdBaseTest):
server_name = "_SeleniumServer"
def open_config_servers(self):
# Test if base page works
self.open_page("http://%s:%s/sabnzbd/config/server" % (SAB_HOST, SAB_PORT))
self.scroll_to_top()
# Show advanced options
advanced_btn = self.selenium_wrapper(self.driver.find_element_by_name, "advanced-settings-button")
if not advanced_btn.get_attribute("checked"):
advanced_btn.click()
def add_test_server(self):
# Add server
self.selenium_wrapper(self.driver.find_element_by_id, "addServerButton").click()
host_inp = self.selenium_wrapper(self.driver.find_element_by_name, "host")
host_inp.clear()
host_inp.send_keys(SAB_NEWSSERVER_HOST)
# Change port
port_inp = self.selenium_wrapper(self.driver.find_element_by_name, "port")
port_inp.clear()
port_inp.send_keys(SAB_NEWSSERVER_PORT)
# Test server-check
self.selenium_wrapper(self.driver.find_element_by_css_selector, "#addServerContent .testServer").click()
self.wait_for_ajax()
check_result = self.selenium_wrapper(
self.driver.find_element_by_css_selector, "#addServerContent .result-box"
).text
assert "Connection Successful" in check_result
# Set test-servername
self.selenium_wrapper(self.driver.find_element_by_id, "displayname").send_keys(self.server_name)
# Add and show details
port_inp.send_keys(Keys.RETURN)
time.sleep(1)
if not self.selenium_wrapper(self.driver.find_element_by_id, "host0").is_displayed():
self.selenium_wrapper(self.driver.find_element_by_class_name, "showserver").click()
def remove_server(self):
# Remove the first server and accept the confirmation
self.selenium_wrapper(self.driver.find_element_by_class_name, "delServer").click()
self.driver.switch_to.alert.accept()
# Check that it's gone
time.sleep(2)
assert self.server_name not in self.driver.page_source
def test_add_and_remove_server(self):
self.open_config_servers()
self.add_test_server()
self.remove_server()
| 39.621795
| 119
| 0.653697
|
from selenium.common.exceptions import NoSuchElementException, UnexpectedAlertPresentException, NoAlertPresentException
from pytest_httpserver import HTTPServer
from tests.testhelper import *
class TestBasicPages(SABnzbdBaseTest):
def test_base_pages(self):
test_urls = ["config", "config/server", "config/categories", "config/scheduling", "config/rss"]
for test_url in test_urls:
self.open_page("http://%s:%s/%s" % (SAB_HOST, SAB_PORT, test_url))
def test_base_submit_pages(self):
test_urls_with_submit = [
"config/general",
"config/folders",
"config/switches",
"config/sorting",
"config/notify",
"config/special",
]
for test_url in test_urls_with_submit:
self.open_page("http://%s:%s/%s" % (SAB_HOST, SAB_PORT, test_url))
submit_btns = self.selenium_wrapper(self.driver.find_elements_by_class_name, "saveButton")
for submit_btn in submit_btns:
if submit_btn.is_displayed():
break
else:
raise NoSuchElementException
submit_btn.click()
try:
self.wait_for_ajax()
except UnexpectedAlertPresentException:
try:
self.selenium_wrapper(self.driver.switch_to.alert.dismiss)
except NoAlertPresentException:
pass
if "special" in test_url:
self.no_page_crash()
else:
time.sleep(1.5)
assert submit_btn.text == "Save Changes"
class TestConfigLogin(SABnzbdBaseTest):
def test_login(self):
self.open_page("http://%s:%s/sabnzbd/config/general" % (SAB_HOST, SAB_PORT))
username_imp = self.selenium_wrapper(self.driver.find_element_by_css_selector, "input[data-hide='username']")
username_imp.clear()
username_imp.send_keys("test_username")
pass_inp = self.selenium_wrapper(self.driver.find_element_by_css_selector, "input[data-hide='password']")
pass_inp.clear()
pass_inp.send_keys("test_password")
self.selenium_wrapper(self.driver.find_element_by_class_name, "saveButton").click()
try:
self.wait_for_ajax()
except UnexpectedAlertPresentException:
try:
self.driver.switch_to.alert.dismiss()
except NoAlertPresentException:
pass
self.open_page("http://%s:%s/sabnzbd/general" % (SAB_HOST, SAB_PORT))
assert "/login/" in self.driver.current_url
username_login = self.selenium_wrapper(self.driver.find_element_by_css_selector, "input[name='username']")
username_login.clear()
username_login.send_keys("nonsense")
pass_login = self.selenium_wrapper(self.driver.find_element_by_css_selector, "input[name='password']")
pass_login.clear()
pass_login.send_keys("nonsense")
self.driver.find_element_by_tag_name("button").click()
assert (
"Authentication failed"
in self.selenium_wrapper(self.driver.find_element_by_class_name, "alert-danger").text
)
username_login = self.selenium_wrapper(self.driver.find_element_by_css_selector, "input[name='username']")
username_login.clear()
username_login.send_keys("test_username")
pass_login = self.selenium_wrapper(self.driver.find_element_by_css_selector, "input[name='password']")
pass_login.clear()
pass_login.send_keys("test_password")
self.driver.find_element_by_tag_name("button").click()
self.open_page("http://%s:%s/sabnzbd/config/general" % (SAB_HOST, SAB_PORT))
assert "/login/" not in self.driver.current_url
username_imp = self.selenium_wrapper(self.driver.find_element_by_css_selector, "input[data-hide='username']")
username_imp.clear()
pass_inp = self.selenium_wrapper(self.driver.find_element_by_css_selector, "input[data-hide='password']")
pass_inp.clear()
self.selenium_wrapper(self.driver.find_element_by_class_name, "saveButton").click()
try:
self.wait_for_ajax()
except UnexpectedAlertPresentException:
try:
self.driver.switch_to.alert.dismiss()
except NoAlertPresentException:
pass
self.open_page("http://%s:%s/sabnzbd/general" % (SAB_HOST, SAB_PORT))
assert "/login/" not in self.driver.current_url
class TestConfigCategories(SABnzbdBaseTest):
category_name = "testCat"
def test_page(self):
self.open_page("http://%s:%s/sabnzbd/config/categories" % (SAB_HOST, SAB_PORT))
self.driver.find_elements_by_name("newname")[1].send_keys("testCat")
self.selenium_wrapper(
self.driver.find_element_by_xpath, "//button/text()[normalize-space(.)='Add']/parent::*"
).click()
self.no_page_crash()
assert self.category_name not in self.driver.page_source
class TestConfigRSS(SABnzbdBaseTest):
rss_name = "_SeleniumFeed"
def test_rss_basic_flow(self, httpserver: HTTPServer):
nzb_data = create_and_read_nzb("basic_rar5")
httpserver.expect_request("/test_nzb.nzb").respond_with_data(nzb_data)
nzb_url = httpserver.url_for("/test_nzb.nzb")
with open(os.path.join(SAB_DATA_DIR, "rss_feed_test.xml")) as rss_file:
rss_data = rss_file.read()
rss_data = rss_data.replace("NZB_URL", nzb_url)
httpserver.expect_request("/rss_feed.xml").respond_with_data(rss_data)
rss_url = httpserver.url_for("/rss_feed.xml")
self.open_page("http://%s:%s/sabnzbd/config/rss" % (SAB_HOST, SAB_PORT))
self.selenium_wrapper(
self.driver.find_element_by_xpath, '//form[@action="add_rss_feed"]//input[@name="enable"]'
).click()
input_name = self.selenium_wrapper(
self.driver.find_element_by_xpath, '//form[@action="add_rss_feed"]//input[@name="feed"]'
)
input_name.clear()
input_name.send_keys(self.rss_name)
self.selenium_wrapper(
self.driver.find_element_by_xpath, '//form[@action="add_rss_feed"]//input[@name="uri"]'
).send_keys(rss_url)
self.selenium_wrapper(self.driver.find_element_by_xpath, '//form[@action="add_rss_feed"]//button').click()
tab_results = int(
self.selenium_wrapper(self.driver.find_element_by_xpath, '//a[@href="#rss-tab-matched"]/span').text
)
assert tab_results > 0
tab_table_results = len(self.driver.find_elements_by_xpath('//div[@id="rss-tab-matched"]/table/tbody/tr'))
assert tab_table_results == tab_results
assert get_api_result("pause") == {"status": True}
# Download something
download_btn = self.selenium_wrapper(
self.driver.find_element_by_xpath, '//div[@id="rss-tab-matched"]/table/tbody//button'
)
download_btn.click()
self.wait_for_ajax()
# Does the page think it's a success?
assert "Added NZB" in download_btn.text
time.sleep(2)
for _ in range(10):
queue_result_slots = get_api_result("queue")["queue"]["slots"]
# Check if the fetch-request was added to the queue
if queue_result_slots:
break
time.sleep(1)
else:
# The loop never stopped, so we fail
pytest.fail("Did not find the RSS job in the queue")
return
# Let's remove this thing
get_api_result("queue", extra_arguments={"name": "delete", "value": "all"})
assert len(get_api_result("queue")["queue"]["slots"]) == 0
assert get_api_result("resume") == {"status": True}
class TestConfigServers(SABnzbdBaseTest):
server_name = "_SeleniumServer"
def open_config_servers(self):
self.open_page("http://%s:%s/sabnzbd/config/server" % (SAB_HOST, SAB_PORT))
self.scroll_to_top()
advanced_btn = self.selenium_wrapper(self.driver.find_element_by_name, "advanced-settings-button")
if not advanced_btn.get_attribute("checked"):
advanced_btn.click()
def add_test_server(self):
self.selenium_wrapper(self.driver.find_element_by_id, "addServerButton").click()
host_inp = self.selenium_wrapper(self.driver.find_element_by_name, "host")
host_inp.clear()
host_inp.send_keys(SAB_NEWSSERVER_HOST)
port_inp = self.selenium_wrapper(self.driver.find_element_by_name, "port")
port_inp.clear()
port_inp.send_keys(SAB_NEWSSERVER_PORT)
self.selenium_wrapper(self.driver.find_element_by_css_selector, "#addServerContent .testServer").click()
self.wait_for_ajax()
check_result = self.selenium_wrapper(
self.driver.find_element_by_css_selector, "#addServerContent .result-box"
).text
assert "Connection Successful" in check_result
self.selenium_wrapper(self.driver.find_element_by_id, "displayname").send_keys(self.server_name)
port_inp.send_keys(Keys.RETURN)
time.sleep(1)
if not self.selenium_wrapper(self.driver.find_element_by_id, "host0").is_displayed():
self.selenium_wrapper(self.driver.find_element_by_class_name, "showserver").click()
def remove_server(self):
self.selenium_wrapper(self.driver.find_element_by_class_name, "delServer").click()
self.driver.switch_to.alert.accept()
time.sleep(2)
assert self.server_name not in self.driver.page_source
def test_add_and_remove_server(self):
self.open_config_servers()
self.add_test_server()
self.remove_server()
| true
| true
|
1c417b1be04154897dcdf094d3d71149d36c36f0
| 773
|
py
|
Python
|
turret/layers/shuffle.py
|
hemantranvir/turret
|
bc3df21541ce2f808c749c985db47a210149f22c
|
[
"MIT"
] | 4
|
2019-03-14T18:27:33.000Z
|
2021-07-05T05:34:30.000Z
|
turret/layers/shuffle.py
|
hemantranvir/turret
|
bc3df21541ce2f808c749c985db47a210149f22c
|
[
"MIT"
] | 1
|
2019-06-07T06:03:04.000Z
|
2019-06-07T06:03:04.000Z
|
turret/layers/shuffle.py
|
hemantranvir/turret
|
bc3df21541ce2f808c749c985db47a210149f22c
|
[
"MIT"
] | 4
|
2019-10-30T10:30:47.000Z
|
2019-10-30T11:15:40.000Z
|
# -*- coding: utf-8 -*-
from .builtin import shuffle_and_reshape
from ..foundational import Dimension
from ..foundational import Dimensions
def shuffle(input, order):
"""Layer to shuffle.
Args:
input(turret.Tensor): The tensor which will be processed by layer.
order(tuple): The permutation applied by the first transpose
operation.
Returns:
tensor(turret.Tensor): Tensor processed by layer.
"""
# IShuffleLayer shuffles also the types of dimensions.
# turret.shuffle keeps the types of dimensions.
in_dims = input.dimensions
out_dims = Dimensions([
Dimension(in_dims[j].size, in_dims[i].type)
for i, j in enumerate(order)])
return shuffle_and_reshape(input, order, out_dims, None)
| 32.208333
| 74
| 0.68564
|
from .builtin import shuffle_and_reshape
from ..foundational import Dimension
from ..foundational import Dimensions
def shuffle(input, order):
in_dims = input.dimensions
out_dims = Dimensions([
Dimension(in_dims[j].size, in_dims[i].type)
for i, j in enumerate(order)])
return shuffle_and_reshape(input, order, out_dims, None)
| true
| true
|
1c417b30e5340b842772007aa29eace4c6887c3a
| 6,782
|
py
|
Python
|
src/models/LCALSTM_after.py
|
jacksonlli/learn-hippo
|
7695d22e73c334b6d9df7e35cb6e30855db187fe
|
[
"MIT"
] | 13
|
2020-12-06T02:31:02.000Z
|
2022-02-16T16:20:31.000Z
|
src/models/LCALSTM_after.py
|
jacksonlli/learn-hippo
|
7695d22e73c334b6d9df7e35cb6e30855db187fe
|
[
"MIT"
] | 1
|
2021-09-19T20:55:32.000Z
|
2021-09-19T20:55:32.000Z
|
src/models/LCALSTM_after.py
|
jacksonlli/learn-hippo
|
7695d22e73c334b6d9df7e35cb6e30855db187fe
|
[
"MIT"
] | 3
|
2020-12-24T00:52:43.000Z
|
2021-12-15T02:12:15.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import pdb
from models.EM import EM
from torch.distributions import Categorical
from models.initializer import initialize_weights
# constants
# number of vector signal (lstm gates)
N_VSIG = 3
# number of scalar signal (sigma)
N_SSIG = 1
# the ordering in the cache
scalar_signal_names = ['input strength']
vector_signal_names = ['f', 'i', 'o']
sigmoid = nn.Sigmoid()
class LCALSTM_after(nn.Module):
def __init__(
self, input_dim, output_dim, rnn_hidden_dim, dec_hidden_dim,
kernel='cosine', dict_len=2, weight_init_scheme='ortho',
cmpt=.8, em_gate=.3, add_penalty_dim=True
):
super(LCALSTM_after, self).__init__()
self.cmpt = cmpt
self.em_gate = em_gate
if add_penalty_dim:
self.input_dim = input_dim + 1
else:
self.input_dim = input_dim
# self.input_dim = input_dim + 1
self.rnn_hidden_dim = rnn_hidden_dim
self.n_hidden_total = (N_VSIG + 1) * rnn_hidden_dim + N_SSIG
# rnn module
self.i2h = nn.Linear(self.input_dim, self.n_hidden_total)
self.h2h = nn.Linear(rnn_hidden_dim, self.n_hidden_total)
# deicion module
self.ih = nn.Linear(rnn_hidden_dim, dec_hidden_dim)
self.actor = nn.Linear(dec_hidden_dim, output_dim)
self.critic = nn.Linear(dec_hidden_dim, 1)
# memory
self.hpc = nn.Linear(
rnn_hidden_dim + rnn_hidden_dim + dec_hidden_dim, N_SSIG
)
self.em = EM(dict_len, rnn_hidden_dim, kernel)
# the RL mechanism
self.weight_init_scheme = weight_init_scheme
self.init_model()
def init_model(self):
# add name fields
self.n_ssig = N_SSIG
self.n_vsig = N_VSIG
self.vsig_names = vector_signal_names
self.ssig_names = scalar_signal_names
# init params
initialize_weights(self, self.weight_init_scheme)
def get_init_states(self, scale=.1, device='cpu'):
h_0_ = sample_random_vector(self.rnn_hidden_dim, scale)
c_0_ = sample_random_vector(self.rnn_hidden_dim, scale)
return (h_0_, c_0_)
def forward(self, x_t, hc_prev, beta=1):
# unpack activity
(h_prev, c_prev) = hc_prev
h_prev = h_prev.view(h_prev.size(1), -1)
c_prev = c_prev.view(c_prev.size(1), -1)
x_t = x_t.view(x_t.size(1), -1)
# transform the input info
preact = self.i2h(x_t) + self.h2h(h_prev)
# get all gate values
gates = preact[:, : N_VSIG * self.rnn_hidden_dim].sigmoid()
c_t_new = preact[:, N_VSIG * self.rnn_hidden_dim + N_SSIG:].tanh()
# split input(write) gate, forget gate, output(read) gate
f_t = gates[:, :self.rnn_hidden_dim]
o_t = gates[:, self.rnn_hidden_dim:2 * self.rnn_hidden_dim]
i_t = gates[:, -self.rnn_hidden_dim:]
# new cell state = gated(prev_c) + gated(new_stuff)
c_t = torch.mul(c_prev, f_t) + torch.mul(i_t, c_t_new)
# make 1st decision attempt
h_t = torch.mul(o_t, c_t.tanh())
dec_act_t = F.relu(self.ih(h_t))
# recall / encode
# hpc_input_t = torch.cat([c_t, dec_act_t], dim=1)
# inps_t = sigmoid(self.hpc(hpc_input_t))
# [inps_t, comp_t] = torch.squeeze(phi_t)
m_t = self.recall(c_t, self.em_gate)
hpc_input_t = torch.cat([m_t, c_t, dec_act_t], dim=1)
em_g_t = sigmoid(self.hpc(hpc_input_t))
cm_t = c_t + m_t * em_g_t
self.encode(cm_t)
# make final dec
h_t = torch.mul(o_t, cm_t.tanh())
dec_act_t = F.relu(self.ih(h_t))
pi_a_t = _softmax(self.actor(dec_act_t), beta)
value_t = self.critic(dec_act_t)
# reshape data
h_t = h_t.view(1, h_t.size(0), -1)
cm_t = cm_t.view(1, cm_t.size(0), -1)
# scache results
scalar_signal = [em_g_t, 0, 0]
vector_signal = [f_t, i_t, o_t]
misc = [h_t, m_t, cm_t, dec_act_t, self.em.get_vals()]
cache = [vector_signal, scalar_signal, misc]
return pi_a_t, value_t, (h_t, cm_t), cache
def recall(self, c_t, inps_t, comp_t=None):
"""run the "pattern completion" procedure
Parameters
----------
c_t : torch.tensor, vector
cell state
leak_t : torch.tensor, scalar
LCA param, leak
comp_t : torch.tensor, scalar
LCA param, lateral inhibition
inps_t : torch.tensor, scalar
LCA param, input strength / feedforward weights
Returns
-------
tensor, tensor
updated cell state, recalled item
"""
if comp_t is None:
comp_t = self.cmpt
if self.em.retrieval_off:
m_t = torch.zeros_like(c_t)
else:
# retrieve memory
m_t = self.em.get_memory(c_t, leak=0, comp=comp_t, w_input=inps_t)
return m_t
def encode(self, cm_t):
if not self.em.encoding_off:
self.em.save_memory(cm_t)
def pick_action(self, action_distribution):
"""action selection by sampling from a multinomial.
Parameters
----------
action_distribution : 1d torch.tensor
action distribution, pi(a|s)
Returns
-------
torch.tensor(int), torch.tensor(float)
sampled action, log_prob(sampled action)
"""
m = Categorical(action_distribution)
a_t = m.sample()
log_prob_a_t = m.log_prob(a_t)
return a_t, log_prob_a_t
def init_em_config(self):
self.flush_episodic_memory()
self.encoding_off()
self.retrieval_off()
def flush_episodic_memory(self):
self.em.flush()
def encoding_off(self):
self.em.encoding_off = True
def retrieval_off(self):
self.em.retrieval_off = True
def encoding_on(self):
self.em.encoding_off = False
def retrieval_on(self):
self.em.retrieval_off = False
def sample_random_vector(n_dim, scale=.1):
return torch.randn(1, 1, n_dim) * scale
def _softmax(z, beta):
"""helper function, softmax with beta
Parameters
----------
z : torch tensor, has 1d underlying structure after torch.squeeze
the raw logits
beta : float, >0
softmax temp, big value -> more "randomness"
Returns
-------
1d torch tensor
a probability distribution | beta
"""
assert beta > 0
# softmax the input to a valid PMF
pi_a = F.softmax(torch.squeeze(z / beta), dim=0)
# make sure the output is valid
if torch.any(torch.isnan(pi_a)):
raise ValueError(f'Softmax produced nan: {z} -> {pi_a}')
return pi_a
| 31.990566
| 78
| 0.607785
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import pdb
from models.EM import EM
from torch.distributions import Categorical
from models.initializer import initialize_weights
N_VSIG = 3
N_SSIG = 1
scalar_signal_names = ['input strength']
vector_signal_names = ['f', 'i', 'o']
sigmoid = nn.Sigmoid()
class LCALSTM_after(nn.Module):
def __init__(
self, input_dim, output_dim, rnn_hidden_dim, dec_hidden_dim,
kernel='cosine', dict_len=2, weight_init_scheme='ortho',
cmpt=.8, em_gate=.3, add_penalty_dim=True
):
super(LCALSTM_after, self).__init__()
self.cmpt = cmpt
self.em_gate = em_gate
if add_penalty_dim:
self.input_dim = input_dim + 1
else:
self.input_dim = input_dim
self.rnn_hidden_dim = rnn_hidden_dim
self.n_hidden_total = (N_VSIG + 1) * rnn_hidden_dim + N_SSIG
self.i2h = nn.Linear(self.input_dim, self.n_hidden_total)
self.h2h = nn.Linear(rnn_hidden_dim, self.n_hidden_total)
self.ih = nn.Linear(rnn_hidden_dim, dec_hidden_dim)
self.actor = nn.Linear(dec_hidden_dim, output_dim)
self.critic = nn.Linear(dec_hidden_dim, 1)
self.hpc = nn.Linear(
rnn_hidden_dim + rnn_hidden_dim + dec_hidden_dim, N_SSIG
)
self.em = EM(dict_len, rnn_hidden_dim, kernel)
self.weight_init_scheme = weight_init_scheme
self.init_model()
def init_model(self):
self.n_ssig = N_SSIG
self.n_vsig = N_VSIG
self.vsig_names = vector_signal_names
self.ssig_names = scalar_signal_names
initialize_weights(self, self.weight_init_scheme)
def get_init_states(self, scale=.1, device='cpu'):
h_0_ = sample_random_vector(self.rnn_hidden_dim, scale)
c_0_ = sample_random_vector(self.rnn_hidden_dim, scale)
return (h_0_, c_0_)
def forward(self, x_t, hc_prev, beta=1):
(h_prev, c_prev) = hc_prev
h_prev = h_prev.view(h_prev.size(1), -1)
c_prev = c_prev.view(c_prev.size(1), -1)
x_t = x_t.view(x_t.size(1), -1)
preact = self.i2h(x_t) + self.h2h(h_prev)
gates = preact[:, : N_VSIG * self.rnn_hidden_dim].sigmoid()
c_t_new = preact[:, N_VSIG * self.rnn_hidden_dim + N_SSIG:].tanh()
f_t = gates[:, :self.rnn_hidden_dim]
o_t = gates[:, self.rnn_hidden_dim:2 * self.rnn_hidden_dim]
i_t = gates[:, -self.rnn_hidden_dim:]
c_t = torch.mul(c_prev, f_t) + torch.mul(i_t, c_t_new)
h_t = torch.mul(o_t, c_t.tanh())
dec_act_t = F.relu(self.ih(h_t))
m_t = self.recall(c_t, self.em_gate)
hpc_input_t = torch.cat([m_t, c_t, dec_act_t], dim=1)
em_g_t = sigmoid(self.hpc(hpc_input_t))
cm_t = c_t + m_t * em_g_t
self.encode(cm_t)
h_t = torch.mul(o_t, cm_t.tanh())
dec_act_t = F.relu(self.ih(h_t))
pi_a_t = _softmax(self.actor(dec_act_t), beta)
value_t = self.critic(dec_act_t)
h_t = h_t.view(1, h_t.size(0), -1)
cm_t = cm_t.view(1, cm_t.size(0), -1)
scalar_signal = [em_g_t, 0, 0]
vector_signal = [f_t, i_t, o_t]
misc = [h_t, m_t, cm_t, dec_act_t, self.em.get_vals()]
cache = [vector_signal, scalar_signal, misc]
return pi_a_t, value_t, (h_t, cm_t), cache
def recall(self, c_t, inps_t, comp_t=None):
if comp_t is None:
comp_t = self.cmpt
if self.em.retrieval_off:
m_t = torch.zeros_like(c_t)
else:
m_t = self.em.get_memory(c_t, leak=0, comp=comp_t, w_input=inps_t)
return m_t
def encode(self, cm_t):
if not self.em.encoding_off:
self.em.save_memory(cm_t)
def pick_action(self, action_distribution):
m = Categorical(action_distribution)
a_t = m.sample()
log_prob_a_t = m.log_prob(a_t)
return a_t, log_prob_a_t
def init_em_config(self):
self.flush_episodic_memory()
self.encoding_off()
self.retrieval_off()
def flush_episodic_memory(self):
self.em.flush()
def encoding_off(self):
self.em.encoding_off = True
def retrieval_off(self):
self.em.retrieval_off = True
def encoding_on(self):
self.em.encoding_off = False
def retrieval_on(self):
self.em.retrieval_off = False
def sample_random_vector(n_dim, scale=.1):
return torch.randn(1, 1, n_dim) * scale
def _softmax(z, beta):
assert beta > 0
pi_a = F.softmax(torch.squeeze(z / beta), dim=0)
if torch.any(torch.isnan(pi_a)):
raise ValueError(f'Softmax produced nan: {z} -> {pi_a}')
return pi_a
| true
| true
|
1c417d3507e5fd27f874af4f2ff4b442ad88aad4
| 1,170
|
py
|
Python
|
cs28_project/cs28/tests/test_award_override.py
|
desuderata/cs28_project
|
d8e84e2ee30f36ce964370580730396b8c211315
|
[
"MIT"
] | null | null | null |
cs28_project/cs28/tests/test_award_override.py
|
desuderata/cs28_project
|
d8e84e2ee30f36ce964370580730396b8c211315
|
[
"MIT"
] | null | null | null |
cs28_project/cs28/tests/test_award_override.py
|
desuderata/cs28_project
|
d8e84e2ee30f36ce964370580730396b8c211315
|
[
"MIT"
] | 1
|
2021-04-29T15:40:57.000Z
|
2021-04-29T15:40:57.000Z
|
""" Tests Award Override
- Tests if award is successfully overridden
author: Yee Hou, Teoh (2471020t)
"""
import json
from django.test import TestCase
from .test_setup import login, populate
from cs28.models import Student, AcademicPlan, GraduationYear, Grade
class AwardOverrideTest(TestCase):
def setUp(self):
login(self)
populate(self)
def test_updated_award(self):
"""
Tests if award is successfully overridden
"""
self.client.post("/cs28/manage/calculate/",
{'year': '19-20',
'plan': 'F100-2208'})
students = Student.objects.get(matricNo="2456789")
data = {"field": "award",
"row": json.dumps({"field": "award",
"id": "2456789",
"award": "Fail",
"oAward": "A1"})}
self.client.post("/cs28/manage/update/", data)
students = Student.objects.get(matricNo="2456789")
new_award = students.updatedAward
self.assertNotEquals("A1", new_award)
self.assertEquals(new_award, "Fail")
| 28.536585
| 68
| 0.553846
|
import json
from django.test import TestCase
from .test_setup import login, populate
from cs28.models import Student, AcademicPlan, GraduationYear, Grade
class AwardOverrideTest(TestCase):
def setUp(self):
login(self)
populate(self)
def test_updated_award(self):
self.client.post("/cs28/manage/calculate/",
{'year': '19-20',
'plan': 'F100-2208'})
students = Student.objects.get(matricNo="2456789")
data = {"field": "award",
"row": json.dumps({"field": "award",
"id": "2456789",
"award": "Fail",
"oAward": "A1"})}
self.client.post("/cs28/manage/update/", data)
students = Student.objects.get(matricNo="2456789")
new_award = students.updatedAward
self.assertNotEquals("A1", new_award)
self.assertEquals(new_award, "Fail")
| true
| true
|
1c417ed357be01c7c65be0b3db6473fd9b7e9af1
| 1,042
|
py
|
Python
|
app/core/migrations/0004_recipe.py
|
lohithn4/recipe-app-api
|
5174dd4ac0fdb3f853d5f6071e8ea3871e54ba44
|
[
"MIT"
] | null | null | null |
app/core/migrations/0004_recipe.py
|
lohithn4/recipe-app-api
|
5174dd4ac0fdb3f853d5f6071e8ea3871e54ba44
|
[
"MIT"
] | null | null | null |
app/core/migrations/0004_recipe.py
|
lohithn4/recipe-app-api
|
5174dd4ac0fdb3f853d5f6071e8ea3871e54ba44
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.4 on 2020-03-13 05:20
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0003_ingredient'),
]
operations = [
migrations.CreateModel(
name='Recipe',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('time_minutes', models.IntegerField()),
('price', models.DecimalField(decimal_places=2, max_digits=5)),
('link', models.CharField(blank=True, max_length=255)),
('ingredients', models.ManyToManyField(to='core.Ingredient')),
('tags', models.ManyToManyField(to='core.Tag')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 35.931034
| 118
| 0.603647
|
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0003_ingredient'),
]
operations = [
migrations.CreateModel(
name='Recipe',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('time_minutes', models.IntegerField()),
('price', models.DecimalField(decimal_places=2, max_digits=5)),
('link', models.CharField(blank=True, max_length=255)),
('ingredients', models.ManyToManyField(to='core.Ingredient')),
('tags', models.ManyToManyField(to='core.Tag')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| true
| true
|
1c41806ee11d36933267fb4b998421647b5d9827
| 13,696
|
py
|
Python
|
loggers/serializers.py
|
drguggiana/bonDjango
|
820df735f60dbe198144235b45de514272edba20
|
[
"MIT"
] | null | null | null |
loggers/serializers.py
|
drguggiana/bonDjango
|
820df735f60dbe198144235b45de514272edba20
|
[
"MIT"
] | null | null | null |
loggers/serializers.py
|
drguggiana/bonDjango
|
820df735f60dbe198144235b45de514272edba20
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from .models import *
from django.contrib.auth.models import User, Group
# define the common extra kwargs
common_extra_kwargs = {'mouse': {'lookup_field': 'mouse_name'}}
# define a function to put the url first and then sort all the other fields
def sort_fields(fields):
if 'mouse' in fields:
fields.remove('mouse')
sorted_fields = (['url', 'mouse'] + sorted(fields))
else:
sorted_fields = (['url'] + sorted(fields))
return sorted_fields
# mouse serializer (comments will be basically the same for below)
class MouseSerializer(serializers.HyperlinkedModelSerializer):
# define the fields associated with the model
# owner is special since it has to be read only
owner = serializers.ReadOnlyField(source='owner.username')
# the rest are all hyperlinked so people can navigate in the API online
# field contents involve establishing that the serializer will deal with many instances, the name of the view and
# whether it's read only or not as default
window = serializers.HyperlinkedRelatedField(many=True, view_name='window-detail', read_only=True,
lookup_field='slug')
surgery = serializers.HyperlinkedRelatedField(many=True, view_name='surgery-detail', read_only=True,
lookup_field='slug')
two_photon = serializers.HyperlinkedRelatedField(many=True, view_name='twophoton-detail', read_only=True)
intrinsic_imaging = serializers.HyperlinkedRelatedField(many=True,
view_name='intrinsicimaging-detail', read_only=True)
vr_experiment = serializers.HyperlinkedRelatedField(many=True, view_name='vrexperiment-detail', read_only=True,
lookup_field='slug')
video_experiment = serializers.HyperlinkedRelatedField(many=True, view_name='videoexperiment-detail', read_only=True
, lookup_field='slug')
score_sheet = serializers.HyperlinkedRelatedField(many=True, view_name='scoresheet-detail', read_only=True,
lookup_field='slug')
immuno_stain = serializers.HyperlinkedRelatedField(many=True, view_name='immunostain-detail', read_only=True)
restriction = serializers.HyperlinkedRelatedField(many=True, view_name='restriction-detail', read_only=True,
lookup_field='slug')
strain_name = serializers.ReadOnlyField()
# django specific Meta class
class Meta:
# define the model the serializer belongs to
model = Mouse
# define the search fields as anything that's not a relation
fields = ([f.name for f in model._meta.get_fields()])
fields = sort_fields(fields)
extra_kwargs = {'url': {'lookup_field': 'mouse_name'}, 'restriction': {'lookup_field': 'slug'},
'window': {'lookup_field': 'slug'}}
class ProfileSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Profile
fields = ([f.name for f in model._meta.get_fields()])
fields = sort_fields(fields)
extra_kwargs = {'user': {'lookup_field': 'username'}}
class UserSerializer(serializers.HyperlinkedModelSerializer):
main_path = serializers.ReadOnlyField(source='profile.main_path',)
class Meta:
model = User
fields = ('url', 'id', 'username', 'mouse', 'groups', 'main_path')
extra_kwargs = common_extra_kwargs.copy()
extra_kwargs['url'] = {'lookup_field': 'username'}
class GroupSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Group
fields = ('url', 'name', 'user_set')
extra_kwargs = {'user_set': {'lookup_field': 'username'}}
class WindowSerializer(serializers.HyperlinkedModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = Window
fields = ([f.name for f in model._meta.get_fields()])
fields = sort_fields(fields)
extra_kwargs = common_extra_kwargs.copy()
extra_kwargs['url'] = {'lookup_field': 'slug'}
class SurgerySerializer(serializers.HyperlinkedModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = Surgery
fields = ([f.name for f in model._meta.get_fields()])
fields.remove('slug')
fields = sort_fields(fields)
extra_kwargs = common_extra_kwargs.copy()
extra_kwargs['url'] = {'lookup_field': 'slug'}
class RestrictionTypeSerializer(serializers.HyperlinkedModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = RestrictionType
fields = ([f.name for f in model._meta.get_fields()])
fields.remove('slug_restrictionType')
fields = sort_fields(fields)
extra_kwargs = common_extra_kwargs.copy()
extra_kwargs['url'] = {'lookup_field': 'slug_restrictionType'}
extra_kwargs['restriction'] = {'lookup_field': 'slug'}
class RestrictionSerializer(serializers.HyperlinkedModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
# start_date = serializers.ReadOnlyField()
end_date = serializers.ReadOnlyField()
class Meta:
model = Restriction
fields = ([f.name for f in model._meta.get_fields()])
fields.remove('slug')
fields = sort_fields(fields)
extra_kwargs = common_extra_kwargs.copy()
extra_kwargs['url'] = {'lookup_field': 'slug'}
extra_kwargs['restriction_type'] = {'lookup_field': 'slug_restrictionType'}
class VideoExperimentSerializer(serializers.HyperlinkedModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
# preproc_files = serializers.HyperlinkedRelatedField(
# view_name='analyzeddata-detail', many=True, read_only=True, lookup_field='slug')
# mouse = serializers.HyperlinkedRelatedField(
# view_name='mouse-detail', read_only=True, lookup_field='mouse_name')
class Meta:
model = VideoExperiment
fields = ([f.name for f in model._meta.get_fields()])
# fields.remove('slug')
fields = sort_fields(fields)
extra_kwargs = common_extra_kwargs.copy()
extra_kwargs['url'] = {'lookup_field': 'slug'}
extra_kwargs['preproc_files'] = {'lookup_field': 'slug'}
class TwoPhotonSerializer(serializers.HyperlinkedModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
preproc_files = serializers.HyperlinkedRelatedField(
view_name='analyzeddata-detail', many=True, read_only=True, lookup_field='slug')
class Meta:
model = TwoPhoton
fields = ([f.name for f in model._meta.get_fields()])
fields = sort_fields(fields)
extra_kwargs = common_extra_kwargs.copy()
# extra_kwargs['preproc_files'] = {'lookup_field': 'slug'}
class IntrinsicImagingSerializer(serializers.HyperlinkedModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
preproc_files = serializers.HyperlinkedRelatedField(
view_name='analyzeddata-detail', many=True, read_only=True, lookup_field='slug')
class Meta:
model = IntrinsicImaging
fields = ([f.name for f in model._meta.get_fields()])
fields = sort_fields(fields)
extra_kwargs = common_extra_kwargs.copy()
# extra_kwargs['preproc_files'] = {'lookup_field': 'slug'}
class VRExperimentSerializer(serializers.HyperlinkedModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
preproc_files = serializers.HyperlinkedRelatedField(
view_name='analyzeddata-detail', many=True, read_only=True, lookup_field='slug')
class Meta:
model = VRExperiment
fields = ([f.name for f in model._meta.get_fields()])
# fields.remove('slug')
fields = sort_fields(fields)
extra_kwargs = common_extra_kwargs.copy()
extra_kwargs['url'] = {'lookup_field': 'slug'}
# extra_kwargs['preproc_files'] = {'lookup_field': 'slug'}
class ProjectSerializer(serializers.HyperlinkedModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = Project
fields = ([f.name for f in model._meta.get_fields()])
fields = sort_fields(fields)
extra_kwargs = {'members': {'lookup_field': 'username'}}
class LicenseSerializer(serializers.HyperlinkedModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = License
fields = ([f.name for f in model._meta.get_fields()])
fields = sort_fields(fields)
extra_kwargs = {'members': {'lookup_field': 'username'}}
class StrainSerializer(serializers.HyperlinkedModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = Strain
fields = ([f.name for f in model._meta.get_fields()])
fields = sort_fields(fields)
extra_kwargs = common_extra_kwargs.copy()
class ScoreSheetSerializer(serializers.HyperlinkedModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = ScoreSheet
fields = ([f.name for f in model._meta.get_fields()])
fields.remove('slug')
fields = sort_fields(fields)
extra_kwargs = common_extra_kwargs.copy()
extra_kwargs['url'] = {'lookup_field': 'slug'}
class ImmunoStainSerializer(serializers.HyperlinkedModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = ImmunoStain
fields = ([f.name for f in model._meta.get_fields()])
fields = sort_fields(fields)
extra_kwargs = common_extra_kwargs.copy()
class MouseSetSerializer(serializers.HyperlinkedModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = MouseSet
fields = ([f.name for f in model._meta.get_fields()])
fields = sort_fields(fields)
extra_kwargs = common_extra_kwargs.copy()
extra_kwargs['restrictiontype'] = {'lookup_field': 'slug_restrictionType'}
class ExperimentTypeSerializer(serializers.HyperlinkedModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = ExperimentType
fields = ([f.name for f in model._meta.get_fields()])
fields = sort_fields(fields)
extra_kwargs = {'users': {'lookup_field': 'username'},
'vrexperiment_type': {'lookup_field': 'slug'},
'videoexperiment_type': {'lookup_field': 'slug'},
'surgery_type': {'lookup_field': 'slug'}}
class AnalyzedDataSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = AnalyzedData
fields = ([f.name for f in model._meta.get_fields()])
# fields.remove('slug')
fields = sort_fields(fields)
extra_kwargs = {'url': {'lookup_field': 'slug'},
'vr_analysis': {'lookup_field': 'slug'},
'video_analysis': {'lookup_field': 'slug'},
'figure_analysis': {'lookup_field': 'slug'}
}
class FigureSerializer(serializers.HyperlinkedModelSerializer):
preproc_files = serializers.HyperlinkedRelatedField(
view_name='analyzeddata-detail', many=True, read_only=True, lookup_field='slug')
class Meta:
model = Figure
fields = ([f.name for f in model._meta.get_fields()])
# fields.remove('slug')
fields = sort_fields(fields)
extra_kwargs = {'url': {'lookup_field': 'slug'},
}
class GeneralSerializer(serializers.ModelSerializer):
class Meta:
model = None
fields = None
extra_kwargs = None
class PythonSerializer(serializers.ModelSerializer):
class Meta:
model = None
fields = None
extra_kwargs = None
# obtained from https://www.django-rest-framework.org/api-guide/serializers/#example
# class DynamicFieldsModelSerializer(serializers.ModelSerializer):
# """
# A ModelSerializer that takes an additional `fields` argument that
# controls which fields should be displayed.
# """
#
# def __init__(self, *args, **kwargs):
#
# fields = kwargs.pop('fields', None)
# # Instantiate the superclass normally
# super(DynamicFieldsModelSerializer, self).__init__(*args, **kwargs)
#
# # fields = self.context['request'].query_params.get('fields')
# if fields is not None:
# # fields = fields.split(',')
# # Drop any fields that are not specified in the `fields` argument.
# allowed = set(fields)
# existing = set(self.fields.keys())
# for field_name in existing - allowed:
# self.fields.pop(field_name)
# Include this in the target serializer, Groups in this case
# users = serializers.SerializerMethodField()
# def get_users(self, obj):
# users_list = User.objects.filter(groups__name=obj.name)
# users_serial = UserSerializer(users_list, many=True, context={'request': self.context['request']}
# , fields=['url', 'username'])
# return users_serial.data
| 38.044444
| 120
| 0.656104
|
from rest_framework import serializers
from .models import *
from django.contrib.auth.models import User, Group
common_extra_kwargs = {'mouse': {'lookup_field': 'mouse_name'}}
def sort_fields(fields):
if 'mouse' in fields:
fields.remove('mouse')
sorted_fields = (['url', 'mouse'] + sorted(fields))
else:
sorted_fields = (['url'] + sorted(fields))
return sorted_fields
class MouseSerializer(serializers.HyperlinkedModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
window = serializers.HyperlinkedRelatedField(many=True, view_name='window-detail', read_only=True,
lookup_field='slug')
surgery = serializers.HyperlinkedRelatedField(many=True, view_name='surgery-detail', read_only=True,
lookup_field='slug')
two_photon = serializers.HyperlinkedRelatedField(many=True, view_name='twophoton-detail', read_only=True)
intrinsic_imaging = serializers.HyperlinkedRelatedField(many=True,
view_name='intrinsicimaging-detail', read_only=True)
vr_experiment = serializers.HyperlinkedRelatedField(many=True, view_name='vrexperiment-detail', read_only=True,
lookup_field='slug')
video_experiment = serializers.HyperlinkedRelatedField(many=True, view_name='videoexperiment-detail', read_only=True
, lookup_field='slug')
score_sheet = serializers.HyperlinkedRelatedField(many=True, view_name='scoresheet-detail', read_only=True,
lookup_field='slug')
immuno_stain = serializers.HyperlinkedRelatedField(many=True, view_name='immunostain-detail', read_only=True)
restriction = serializers.HyperlinkedRelatedField(many=True, view_name='restriction-detail', read_only=True,
lookup_field='slug')
strain_name = serializers.ReadOnlyField()
# django specific Meta class
class Meta:
# define the model the serializer belongs to
model = Mouse
# define the search fields as anything that's not a relation
fields = ([f.name for f in model._meta.get_fields()])
fields = sort_fields(fields)
extra_kwargs = {'url': {'lookup_field': 'mouse_name'}, 'restriction': {'lookup_field': 'slug'},
'window': {'lookup_field': 'slug'}}
class ProfileSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Profile
fields = ([f.name for f in model._meta.get_fields()])
fields = sort_fields(fields)
extra_kwargs = {'user': {'lookup_field': 'username'}}
class UserSerializer(serializers.HyperlinkedModelSerializer):
main_path = serializers.ReadOnlyField(source='profile.main_path',)
class Meta:
model = User
fields = ('url', 'id', 'username', 'mouse', 'groups', 'main_path')
extra_kwargs = common_extra_kwargs.copy()
extra_kwargs['url'] = {'lookup_field': 'username'}
class GroupSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Group
fields = ('url', 'name', 'user_set')
extra_kwargs = {'user_set': {'lookup_field': 'username'}}
class WindowSerializer(serializers.HyperlinkedModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = Window
fields = ([f.name for f in model._meta.get_fields()])
fields = sort_fields(fields)
extra_kwargs = common_extra_kwargs.copy()
extra_kwargs['url'] = {'lookup_field': 'slug'}
class SurgerySerializer(serializers.HyperlinkedModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = Surgery
fields = ([f.name for f in model._meta.get_fields()])
fields.remove('slug')
fields = sort_fields(fields)
extra_kwargs = common_extra_kwargs.copy()
extra_kwargs['url'] = {'lookup_field': 'slug'}
class RestrictionTypeSerializer(serializers.HyperlinkedModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = RestrictionType
fields = ([f.name for f in model._meta.get_fields()])
fields.remove('slug_restrictionType')
fields = sort_fields(fields)
extra_kwargs = common_extra_kwargs.copy()
extra_kwargs['url'] = {'lookup_field': 'slug_restrictionType'}
extra_kwargs['restriction'] = {'lookup_field': 'slug'}
class RestrictionSerializer(serializers.HyperlinkedModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
end_date = serializers.ReadOnlyField()
class Meta:
model = Restriction
fields = ([f.name for f in model._meta.get_fields()])
fields.remove('slug')
fields = sort_fields(fields)
extra_kwargs = common_extra_kwargs.copy()
extra_kwargs['url'] = {'lookup_field': 'slug'}
extra_kwargs['restriction_type'] = {'lookup_field': 'slug_restrictionType'}
class VideoExperimentSerializer(serializers.HyperlinkedModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = VideoExperiment
fields = ([f.name for f in model._meta.get_fields()])
fields = sort_fields(fields)
extra_kwargs = common_extra_kwargs.copy()
extra_kwargs['url'] = {'lookup_field': 'slug'}
extra_kwargs['preproc_files'] = {'lookup_field': 'slug'}
class TwoPhotonSerializer(serializers.HyperlinkedModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
preproc_files = serializers.HyperlinkedRelatedField(
view_name='analyzeddata-detail', many=True, read_only=True, lookup_field='slug')
class Meta:
model = TwoPhoton
fields = ([f.name for f in model._meta.get_fields()])
fields = sort_fields(fields)
extra_kwargs = common_extra_kwargs.copy()
class IntrinsicImagingSerializer(serializers.HyperlinkedModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
preproc_files = serializers.HyperlinkedRelatedField(
view_name='analyzeddata-detail', many=True, read_only=True, lookup_field='slug')
class Meta:
model = IntrinsicImaging
fields = ([f.name for f in model._meta.get_fields()])
fields = sort_fields(fields)
extra_kwargs = common_extra_kwargs.copy()
class VRExperimentSerializer(serializers.HyperlinkedModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
preproc_files = serializers.HyperlinkedRelatedField(
view_name='analyzeddata-detail', many=True, read_only=True, lookup_field='slug')
class Meta:
model = VRExperiment
fields = ([f.name for f in model._meta.get_fields()])
fields = sort_fields(fields)
extra_kwargs = common_extra_kwargs.copy()
extra_kwargs['url'] = {'lookup_field': 'slug'}
class ProjectSerializer(serializers.HyperlinkedModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = Project
fields = ([f.name for f in model._meta.get_fields()])
fields = sort_fields(fields)
extra_kwargs = {'members': {'lookup_field': 'username'}}
class LicenseSerializer(serializers.HyperlinkedModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = License
fields = ([f.name for f in model._meta.get_fields()])
fields = sort_fields(fields)
extra_kwargs = {'members': {'lookup_field': 'username'}}
class StrainSerializer(serializers.HyperlinkedModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = Strain
fields = ([f.name for f in model._meta.get_fields()])
fields = sort_fields(fields)
extra_kwargs = common_extra_kwargs.copy()
class ScoreSheetSerializer(serializers.HyperlinkedModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = ScoreSheet
fields = ([f.name for f in model._meta.get_fields()])
fields.remove('slug')
fields = sort_fields(fields)
extra_kwargs = common_extra_kwargs.copy()
extra_kwargs['url'] = {'lookup_field': 'slug'}
class ImmunoStainSerializer(serializers.HyperlinkedModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = ImmunoStain
fields = ([f.name for f in model._meta.get_fields()])
fields = sort_fields(fields)
extra_kwargs = common_extra_kwargs.copy()
class MouseSetSerializer(serializers.HyperlinkedModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = MouseSet
fields = ([f.name for f in model._meta.get_fields()])
fields = sort_fields(fields)
extra_kwargs = common_extra_kwargs.copy()
extra_kwargs['restrictiontype'] = {'lookup_field': 'slug_restrictionType'}
class ExperimentTypeSerializer(serializers.HyperlinkedModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = ExperimentType
fields = ([f.name for f in model._meta.get_fields()])
fields = sort_fields(fields)
extra_kwargs = {'users': {'lookup_field': 'username'},
'vrexperiment_type': {'lookup_field': 'slug'},
'videoexperiment_type': {'lookup_field': 'slug'},
'surgery_type': {'lookup_field': 'slug'}}
class AnalyzedDataSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = AnalyzedData
fields = ([f.name for f in model._meta.get_fields()])
fields = sort_fields(fields)
extra_kwargs = {'url': {'lookup_field': 'slug'},
'vr_analysis': {'lookup_field': 'slug'},
'video_analysis': {'lookup_field': 'slug'},
'figure_analysis': {'lookup_field': 'slug'}
}
class FigureSerializer(serializers.HyperlinkedModelSerializer):
preproc_files = serializers.HyperlinkedRelatedField(
view_name='analyzeddata-detail', many=True, read_only=True, lookup_field='slug')
class Meta:
model = Figure
fields = ([f.name for f in model._meta.get_fields()])
fields = sort_fields(fields)
extra_kwargs = {'url': {'lookup_field': 'slug'},
}
class GeneralSerializer(serializers.ModelSerializer):
class Meta:
model = None
fields = None
extra_kwargs = None
class PythonSerializer(serializers.ModelSerializer):
class Meta:
model = None
fields = None
extra_kwargs = None
A ModelSerializer that takes an additional `fields` argument that
# controls which fields should be displayed.
# """
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.