blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
958d387bfdf88a4823aecddff915b3399bd6d534
|
1b94c7cfd66804fe8d40b5def35e4b9b18d69ba2
|
/stubs/googleapiclient/channel.pyi
|
da9374c9c074bf862fa3bd9901d531bc2adf7ce1
|
[
"MIT"
] |
permissive
|
the-blue-alliance/the-blue-alliance
|
3dc210a9611ce9b240907ffd420f78040318dcdc
|
6d42f3cdb2f785d192f2871419e58aaae3445029
|
refs/heads/py3
| 2023-08-22T21:02:36.398100
| 2023-08-22T19:14:01
| 2023-08-22T19:14:01
| 888,427
| 344
| 263
|
MIT
| 2023-09-14T18:35:20
| 2010-09-04T20:34:11
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,125
|
pyi
|
channel.pyi
|
from googleapiclient import errors as errors
from typing import Any, Optional
EPOCH: Any
CHANNEL_PARAMS: Any
X_GOOG_CHANNEL_ID: str
X_GOOG_MESSAGE_NUMBER: str
X_GOOG_RESOURCE_STATE: str
X_GOOG_RESOURCE_URI: str
X_GOOG_RESOURCE_ID: str
class Notification:
message_number: Any = ...
state: Any = ...
resource_uri: Any = ...
resource_id: Any = ...
def __init__(self, message_number: Any, state: Any, resource_uri: Any, resource_id: Any) -> None: ...
class Channel:
type: Any = ...
id: Any = ...
token: Any = ...
address: Any = ...
expiration: Any = ...
params: Any = ...
resource_id: Any = ...
resource_uri: Any = ...
def __init__(self, type: Any, id: Any, token: Any, address: Any, expiration: Optional[Any] = ..., params: Optional[Any] = ..., resource_id: str = ..., resource_uri: str = ...) -> None: ...
def body(self): ...
def update(self, resp: Any) -> None: ...
def notification_from_headers(channel: Any, headers: Any): ...
def new_webhook_channel(url: Any, token: Optional[Any] = ..., expiration: Optional[Any] = ..., params: Optional[Any] = ...): ...
|
63a2becf81feda9ad4391c3164e36ebd6f909fb1
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/gui/prb_control/entities/base/pre_queue/ctx.py
|
d9900e2f814eed7f7392792a9f7e2f5c59a38fcc
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 1,205
|
py
|
ctx.py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/prb_control/entities/base/pre_queue/ctx.py
from gui.prb_control.entities.base.ctx import PrbCtrlRequestCtx
from gui.prb_control.settings import CTRL_ENTITY_TYPE, REQUEST_TYPE
from gui.prb_control.settings import FUNCTIONAL_FLAG
from gui.shared.utils.decorators import ReprInjector
class _PreQueueRequestCtx(PrbCtrlRequestCtx):
def __init__(self, **kwargs):
super(_PreQueueRequestCtx, self).__init__(ctrlType=CTRL_ENTITY_TYPE.PREQUEUE, **kwargs)
class QueueCtx(_PreQueueRequestCtx):
def getRequestType(self):
return REQUEST_TYPE.QUEUE
class DequeueCtx(_PreQueueRequestCtx):
def getRequestType(self):
return REQUEST_TYPE.DEQUEUE
class JoinPreQueueModeCtx(_PreQueueRequestCtx):
def __init__(self, queueType, flags=FUNCTIONAL_FLAG.UNDEFINED, waitingID=''):
super(JoinPreQueueModeCtx, self).__init__(entityType=queueType, flags=flags, waitingID=waitingID)
def getID(self):
pass
@ReprInjector.withParent(('getWaitingID', 'waitingID'))
class LeavePreQueueCtx(_PreQueueRequestCtx):
def getRequestType(self):
return REQUEST_TYPE.LEAVE
|
cf9750ed0f1d3c6032f787805685f3aa0c7ca48c
|
81285c396e0fa27f242767bdc7b4f230e624b80a
|
/tests/unit/test_replays.py
|
e5ce185a762b141a6abef71fe4291c404783dbb3
|
[
"Apache-2.0"
] |
permissive
|
betamaxpy/betamax
|
b1e3c7d3c1fc220e6c672b3c1a40ab0fa9f4c87a
|
8aa9f348d55ee0d584859603474efc74a0b76c49
|
refs/heads/main
| 2023-09-02T03:10:24.155432
| 2022-12-24T14:17:56
| 2022-12-24T14:17:56
| 11,439,734
| 285
| 40
|
NOASSERTION
| 2023-05-13T15:05:07
| 2013-07-16T03:23:22
|
Python
|
UTF-8
|
Python
| false
| false
| 747
|
py
|
test_replays.py
|
from betamax import Betamax, BetamaxError
from requests import Session
import unittest
class TestReplays(unittest.TestCase):
def setUp(self):
self.session = Session()
def test_replays_response_on_right_order(self):
s = self.session
opts = {'record': 'none'}
with Betamax(s).use_cassette('test_replays_response_on_right_order', **opts) as betamax:
self.cassette_path = betamax.current_cassette.cassette_path
r0 = s.get('http://httpbin.org/get')
r1 = s.get('http://httpbin.org/get')
r0_found = (b'72.160.214.132' in r0.content)
assert r0_found == True
r1_found = (b'72.160.214.133' in r1.content)
assert r1_found == True
|
97b374ff93e6ac389abe049527be22d0b57ad4cd
|
5da5473ff3026165a47f98744bac82903cf008e0
|
/packages/google-cloud-recommendations-ai/google/cloud/recommendationengine_v1beta1/types/catalog_service.py
|
17d3815796f6bbfa2ae55db89490e0b437232cf0
|
[
"Apache-2.0"
] |
permissive
|
googleapis/google-cloud-python
|
ed61a5f03a476ab6053870f4da7bc5534e25558b
|
93c4e63408c65129422f65217325f4e7d41f7edf
|
refs/heads/main
| 2023-09-04T09:09:07.852632
| 2023-08-31T22:49:26
| 2023-08-31T22:49:26
| 16,316,451
| 2,792
| 917
|
Apache-2.0
| 2023-09-14T21:45:18
| 2014-01-28T15:51:47
|
Python
|
UTF-8
|
Python
| false
| false
| 5,473
|
py
|
catalog_service.py
|
# -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import MutableMapping, MutableSequence
from google.protobuf import field_mask_pb2 # type: ignore
import proto # type: ignore
from google.cloud.recommendationengine_v1beta1.types import catalog
__protobuf__ = proto.module(
package="google.cloud.recommendationengine.v1beta1",
manifest={
"CreateCatalogItemRequest",
"GetCatalogItemRequest",
"ListCatalogItemsRequest",
"ListCatalogItemsResponse",
"UpdateCatalogItemRequest",
"DeleteCatalogItemRequest",
},
)
class CreateCatalogItemRequest(proto.Message):
r"""Request message for CreateCatalogItem method.
Attributes:
parent (str):
Required. The parent catalog resource name, such as
``projects/*/locations/global/catalogs/default_catalog``.
catalog_item (google.cloud.recommendationengine_v1beta1.types.CatalogItem):
Required. The catalog item to create.
"""
parent: str = proto.Field(
proto.STRING,
number=1,
)
catalog_item: catalog.CatalogItem = proto.Field(
proto.MESSAGE,
number=2,
message=catalog.CatalogItem,
)
class GetCatalogItemRequest(proto.Message):
r"""Request message for GetCatalogItem method.
Attributes:
name (str):
Required. Full resource name of catalog item, such as
``projects/*/locations/global/catalogs/default_catalog/catalogitems/some_catalog_item_id``.
"""
name: str = proto.Field(
proto.STRING,
number=1,
)
class ListCatalogItemsRequest(proto.Message):
r"""Request message for ListCatalogItems method.
Attributes:
parent (str):
Required. The parent catalog resource name, such as
``projects/*/locations/global/catalogs/default_catalog``.
page_size (int):
Optional. Maximum number of results to return
per page. If zero, the service will choose a
reasonable default.
page_token (str):
Optional. The previous
ListCatalogItemsResponse.next_page_token.
filter (str):
Optional. A filter to apply on the list
results.
"""
parent: str = proto.Field(
proto.STRING,
number=1,
)
page_size: int = proto.Field(
proto.INT32,
number=2,
)
page_token: str = proto.Field(
proto.STRING,
number=3,
)
filter: str = proto.Field(
proto.STRING,
number=4,
)
class ListCatalogItemsResponse(proto.Message):
r"""Response message for ListCatalogItems method.
Attributes:
catalog_items (MutableSequence[google.cloud.recommendationengine_v1beta1.types.CatalogItem]):
The catalog items.
next_page_token (str):
If empty, the list is complete. If nonempty, the token to
pass to the next request's
ListCatalogItemRequest.page_token.
"""
@property
def raw_page(self):
return self
catalog_items: MutableSequence[catalog.CatalogItem] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=catalog.CatalogItem,
)
next_page_token: str = proto.Field(
proto.STRING,
number=2,
)
class UpdateCatalogItemRequest(proto.Message):
r"""Request message for UpdateCatalogItem method.
Attributes:
name (str):
Required. Full resource name of catalog item, such as
``projects/*/locations/global/catalogs/default_catalog/catalogItems/some_catalog_item_id``.
catalog_item (google.cloud.recommendationengine_v1beta1.types.CatalogItem):
Required. The catalog item to update/create. The
'catalog_item_id' field has to match that in the 'name'.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Optional. Indicates which fields in the
provided 'item' to update. If not set, will by
default update all fields.
"""
name: str = proto.Field(
proto.STRING,
number=1,
)
catalog_item: catalog.CatalogItem = proto.Field(
proto.MESSAGE,
number=2,
message=catalog.CatalogItem,
)
update_mask: field_mask_pb2.FieldMask = proto.Field(
proto.MESSAGE,
number=3,
message=field_mask_pb2.FieldMask,
)
class DeleteCatalogItemRequest(proto.Message):
r"""Request message for DeleteCatalogItem method.
Attributes:
name (str):
Required. Full resource name of catalog item, such as
``projects/*/locations/global/catalogs/default_catalog/catalogItems/some_catalog_item_id``.
"""
name: str = proto.Field(
proto.STRING,
number=1,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
e2d7ad28221d2b1d2f845a859b6ff9d784d258e2
|
d46844ac1c4230579d6c87d800e07fb41bc99592
|
/pwncat/modules/linux/enumerate/system/selinux.py
|
502b24ddf0d51348bd70135dbd9dd0e4f5a0717b
|
[
"MIT"
] |
permissive
|
calebstewart/pwncat
|
14ade3e424fb70ce3e62b8b5c5053959515799e7
|
37f04d4e16ff47c7fd70e95162f9fccd327cca7e
|
refs/heads/master
| 2023-08-14T04:27:04.773361
| 2022-03-21T20:35:00
| 2022-03-21T20:35:00
| 261,925,766
| 2,177
| 267
|
MIT
| 2023-05-19T04:33:17
| 2020-05-07T02:02:40
|
Python
|
UTF-8
|
Python
| false
| false
| 2,161
|
py
|
selinux.py
|
#!/usr/bin/env python3
from typing import Dict
import rich.markup
from pwncat.db import Fact
from pwncat.platform.linux import Linux
from pwncat.modules.enumerate import Schedule, EnumerateModule
class SELinuxState(Fact):
def __init__(self, source, state, status):
super().__init__(source=source, types=["system.selinux"])
self.state: str = state
self.status: Dict[str, str] = status
def title(self, session):
result = "SELinux is "
if self.state == "enabled":
result += "[red]enabled[/red]"
elif self.state == "disabled":
result += "[green]disabled[/green]"
else:
result += f"[yellow]{rich.markup.escape(self.state)}[/yellow]"
return result
@property
def mode(self) -> str:
return self.status.get("Current mode", "unknown").lower()
@property
def enabled(self) -> bool:
return self.state.lower() == "enabled"
def description(self, session):
width = max(len(x) for x in self.status) + 1
return "\n".join(
f"{key+':':{width}} {value}" for key, value in self.status.items()
)
class Module(EnumerateModule):
"""
Retrieve the current SELinux state
"""
PROVIDES = ["system.selinux"]
SCHEDULE = Schedule.ONCE
PLATFORM = [Linux]
def enumerate(self, session):
try:
output = session.platform.run("sestatus", capture_output=True, text=True)
except (FileNotFoundError, PermissionError):
return
if output:
output = output.stdout.strip()
status = {}
for line in output.split("\n"):
line = line.strip().replace("\t", " ")
values = " ".join([x for x in line.split(" ") if x != ""]).split(":")
key = values[0].rstrip(":").strip()
value = " ".join(values[1:])
status[key] = value.strip()
if "SELinux status" in status:
state = status["SELinux status"]
else:
state = "unknown"
yield SELinuxState(self.name, state, status)
|
40c424866b437d8ea7aa8afb2644510f4f9f01b3
|
99b2aff89dcec2f43cee32a6bdd4c0c43d6c51fa
|
/tests/__init__.py
|
763dbf26c3905b0aebe48f83399f6a570d0ea8d2
|
[
"MIT"
] |
permissive
|
baking-bad/pytezos
|
c4248bde49a5b05521b8cc51eeca588b1a721660
|
19747e3acec2141f06e812025673f497fc07e2d4
|
refs/heads/master
| 2023-07-06T21:57:09.572985
| 2023-07-05T11:45:27
| 2023-07-05T11:45:27
| 169,243,460
| 115
| 43
|
MIT
| 2023-07-04T16:28:09
| 2019-02-05T13:12:50
|
Python
|
UTF-8
|
Python
| false
| false
| 57
|
py
|
__init__.py
|
import michelson_kernel # pylint: disable=unused-import
|
d4e075b5653955492d2d9f334839f39dec7a96aa
|
a41e1498e3c080f47abd8e8e57157548df3ebbf1
|
/pandas/tests/util/test_assert_produces_warning.py
|
5c27a3ee79d4a82bce83eec56ab9d88e10dc06cd
|
[
"BSD-3-Clause"
] |
permissive
|
pandas-dev/pandas
|
e7e639454a298bebc272622e66faa9829ea393bb
|
c7325d7e7e77ecb4a4e57b48bc25265277c75712
|
refs/heads/main
| 2023-09-01T12:42:07.927176
| 2023-09-01T11:14:10
| 2023-09-01T11:14:10
| 858,127
| 36,166
| 18,728
|
BSD-3-Clause
| 2023-09-14T21:18:41
| 2010-08-24T01:37:33
|
Python
|
UTF-8
|
Python
| false
| false
| 8,412
|
py
|
test_assert_produces_warning.py
|
""""
Test module for testing ``pandas._testing.assert_produces_warning``.
"""
import warnings
import pytest
from pandas.errors import (
DtypeWarning,
PerformanceWarning,
)
import pandas._testing as tm
@pytest.fixture(
params=[
RuntimeWarning,
ResourceWarning,
UserWarning,
FutureWarning,
DeprecationWarning,
PerformanceWarning,
DtypeWarning,
],
)
def category(request):
"""
Return unique warning.
Useful for testing behavior of tm.assert_produces_warning with various categories.
"""
return request.param
@pytest.fixture(
params=[
(RuntimeWarning, UserWarning),
(UserWarning, FutureWarning),
(FutureWarning, RuntimeWarning),
(DeprecationWarning, PerformanceWarning),
(PerformanceWarning, FutureWarning),
(DtypeWarning, DeprecationWarning),
(ResourceWarning, DeprecationWarning),
(FutureWarning, DeprecationWarning),
],
ids=lambda x: type(x).__name__,
)
def pair_different_warnings(request):
"""
Return pair or different warnings.
Useful for testing how several different warnings are handled
in tm.assert_produces_warning.
"""
return request.param
def f():
warnings.warn("f1", FutureWarning)
warnings.warn("f2", RuntimeWarning)
@pytest.mark.filterwarnings("ignore:f1:FutureWarning")
def test_assert_produces_warning_honors_filter():
# Raise by default.
msg = r"Caused unexpected warning\(s\)"
with pytest.raises(AssertionError, match=msg):
with tm.assert_produces_warning(RuntimeWarning):
f()
with tm.assert_produces_warning(RuntimeWarning, raise_on_extra_warnings=False):
f()
@pytest.mark.parametrize(
"message, match",
[
("", None),
("", ""),
("Warning message", r".*"),
("Warning message", "War"),
("Warning message", r"[Ww]arning"),
("Warning message", "age"),
("Warning message", r"age$"),
("Message 12-234 with numbers", r"\d{2}-\d{3}"),
("Message 12-234 with numbers", r"^Mes.*\d{2}-\d{3}"),
("Message 12-234 with numbers", r"\d{2}-\d{3}\s\S+"),
("Message, which we do not match", None),
],
)
def test_catch_warning_category_and_match(category, message, match):
with tm.assert_produces_warning(category, match=match):
warnings.warn(message, category)
def test_fail_to_match_runtime_warning():
category = RuntimeWarning
match = "Did not see this warning"
unmatched = (
r"Did not see warning 'RuntimeWarning' matching 'Did not see this warning'. "
r"The emitted warning messages are "
r"\[RuntimeWarning\('This is not a match.'\), "
r"RuntimeWarning\('Another unmatched warning.'\)\]"
)
with pytest.raises(AssertionError, match=unmatched):
with tm.assert_produces_warning(category, match=match):
warnings.warn("This is not a match.", category)
warnings.warn("Another unmatched warning.", category)
def test_fail_to_match_future_warning():
category = FutureWarning
match = "Warning"
unmatched = (
r"Did not see warning 'FutureWarning' matching 'Warning'. "
r"The emitted warning messages are "
r"\[FutureWarning\('This is not a match.'\), "
r"FutureWarning\('Another unmatched warning.'\)\]"
)
with pytest.raises(AssertionError, match=unmatched):
with tm.assert_produces_warning(category, match=match):
warnings.warn("This is not a match.", category)
warnings.warn("Another unmatched warning.", category)
def test_fail_to_match_resource_warning():
category = ResourceWarning
match = r"\d+"
unmatched = (
r"Did not see warning 'ResourceWarning' matching '\\d\+'. "
r"The emitted warning messages are "
r"\[ResourceWarning\('This is not a match.'\), "
r"ResourceWarning\('Another unmatched warning.'\)\]"
)
with pytest.raises(AssertionError, match=unmatched):
with tm.assert_produces_warning(category, match=match):
warnings.warn("This is not a match.", category)
warnings.warn("Another unmatched warning.", category)
def test_fail_to_catch_actual_warning(pair_different_warnings):
expected_category, actual_category = pair_different_warnings
match = "Did not see expected warning of class"
with pytest.raises(AssertionError, match=match):
with tm.assert_produces_warning(expected_category):
warnings.warn("warning message", actual_category)
def test_ignore_extra_warning(pair_different_warnings):
expected_category, extra_category = pair_different_warnings
with tm.assert_produces_warning(expected_category, raise_on_extra_warnings=False):
warnings.warn("Expected warning", expected_category)
warnings.warn("Unexpected warning OK", extra_category)
def test_raise_on_extra_warning(pair_different_warnings):
expected_category, extra_category = pair_different_warnings
match = r"Caused unexpected warning\(s\)"
with pytest.raises(AssertionError, match=match):
with tm.assert_produces_warning(expected_category):
warnings.warn("Expected warning", expected_category)
warnings.warn("Unexpected warning NOT OK", extra_category)
def test_same_category_different_messages_first_match():
category = UserWarning
with tm.assert_produces_warning(category, match=r"^Match this"):
warnings.warn("Match this", category)
warnings.warn("Do not match that", category)
warnings.warn("Do not match that either", category)
def test_same_category_different_messages_last_match():
category = DeprecationWarning
with tm.assert_produces_warning(category, match=r"^Match this"):
warnings.warn("Do not match that", category)
warnings.warn("Do not match that either", category)
warnings.warn("Match this", category)
def test_match_multiple_warnings():
# https://github.com/pandas-dev/pandas/issues/47829
category = (FutureWarning, UserWarning)
with tm.assert_produces_warning(category, match=r"^Match this"):
warnings.warn("Match this", FutureWarning)
warnings.warn("Match this too", UserWarning)
def test_right_category_wrong_match_raises(pair_different_warnings):
target_category, other_category = pair_different_warnings
with pytest.raises(AssertionError, match="Did not see warning.*matching"):
with tm.assert_produces_warning(target_category, match=r"^Match this"):
warnings.warn("Do not match it", target_category)
warnings.warn("Match this", other_category)
@pytest.mark.parametrize("false_or_none", [False, None])
class TestFalseOrNoneExpectedWarning:
def test_raise_on_warning(self, false_or_none):
msg = r"Caused unexpected warning\(s\)"
with pytest.raises(AssertionError, match=msg):
with tm.assert_produces_warning(false_or_none):
f()
def test_no_raise_without_warning(self, false_or_none):
with tm.assert_produces_warning(false_or_none):
pass
def test_no_raise_with_false_raise_on_extra(self, false_or_none):
with tm.assert_produces_warning(false_or_none, raise_on_extra_warnings=False):
f()
def test_raises_during_exception():
msg = "Did not see expected warning of class 'UserWarning'"
with pytest.raises(AssertionError, match=msg):
with tm.assert_produces_warning(UserWarning):
raise ValueError
with pytest.raises(AssertionError, match=msg):
with tm.assert_produces_warning(UserWarning):
warnings.warn("FutureWarning", FutureWarning)
raise IndexError
msg = "Caused unexpected warning"
with pytest.raises(AssertionError, match=msg):
with tm.assert_produces_warning(None):
warnings.warn("FutureWarning", FutureWarning)
raise SystemError
def test_passes_during_exception():
with pytest.raises(SyntaxError, match="Error"):
with tm.assert_produces_warning(None):
raise SyntaxError("Error")
with pytest.raises(ValueError, match="Error"):
with tm.assert_produces_warning(FutureWarning, match="FutureWarning"):
warnings.warn("FutureWarning", FutureWarning)
raise ValueError("Error")
|
97ecfb2294bd1db9b596c746e2eb11e22c100c6b
|
d6aae799e18e907fb413b715200c7832252a87e5
|
/3d-object-classification/pointnet2/evaluate.py
|
a9d2b66325044579adb08afd71a2a25ffebcd3b8
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-proprietary-license",
"Apache-2.0",
"CC-BY-NC-4.0"
] |
permissive
|
sony/nnabla-examples
|
0d0bbd5df3028996e790bcf07248fdb0932697d1
|
41f71faa6efff7774a76bbd5af3198322a90a6ab
|
refs/heads/master
| 2023-09-04T03:45:54.023899
| 2023-08-22T03:31:21
| 2023-08-22T03:31:21
| 109,625,584
| 308
| 108
|
Apache-2.0
| 2023-08-22T03:31:23
| 2017-11-05T23:30:40
|
Python
|
UTF-8
|
Python
| false
| false
| 4,889
|
py
|
evaluate.py
|
# Copyright 2022 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Tuple
import argparse
import os
import numpy as np
import nnabla as nn
from nnabla.ext_utils import get_extension_context
from nnabla.utils.data_iterator import DataIterator
from nnabla.logger import logger
from pointnet2 import pointnet2_classification_msg, pointnet2_classification_ssg
from loss import classification_loss
from running_utils import categorical_accuracy
# Install neu (nnabla examples utils) to import these functions.
# See [NEU](https://github.com/nnabla/nnabla-examples/tree/master/utils).
from neu.datasets.modelnet40_normal_resampled import data_iterator_modelnet40_normal_resampled
from neu.checkpoint_util import load_checkpoint
def eval_one_epoch(
valid_data_iter: DataIterator,
valid_vars: Dict[str, nn.Variable],
valid_loss_vars: Dict[str, nn.Variable],
) -> Tuple[np.ndarray, np.ndarray]:
total_steps = 0
total_accuracy = 0.0
total_loss = 0.0
num_iterations = valid_data_iter.size // valid_data_iter.batch_size
for _ in range(num_iterations):
point_cloud, label = valid_data_iter.next()
valid_vars["point_cloud"].d = point_cloud
valid_vars["label"].d = label
valid_loss_vars["loss"].forward(clear_buffer=True)
pred_logits = valid_loss_vars["pred"].d.copy()
accuracy = categorical_accuracy(pred_logits, valid_vars["label"].d)
total_steps += 1
total_accuracy += accuracy
total_loss += float(valid_loss_vars["loss"].d)
average_accuracy = total_accuracy / float(total_steps)
average_loss = total_loss / float(total_steps)
return average_accuracy, average_loss
def evaluate(args):
# Set context
extension_module = args.context
ctx = get_extension_context(extension_module, device_id=args.device_id)
nn.set_default_context(ctx)
# Feature dim, with normal vector or not
feature_dim = 6 if args.with_normal else 3
# Create validation graph
valid_batch_size = 4 # Setting 4 is for using all data of valid dataset
point_cloud_valid = nn.Variable(
(valid_batch_size, args.num_points, feature_dim))
label_valid = nn.Variable((valid_batch_size, 1))
if args.model_type == "ssg":
pred_valid = pointnet2_classification_ssg(
point_cloud_valid, train=False, num_classes=args.num_classes)
elif args.model_type == "msg":
pred_valid = pointnet2_classification_msg(
point_cloud_valid, train=False, num_classes=args.num_classes)
else:
raise ValueError
pred_valid.persistent = True
loss_valid = classification_loss(pred_valid, label_valid)
valid_vars = {"point_cloud": point_cloud_valid, "label": label_valid}
valid_loss_vars = {"loss": loss_valid, "pred": pred_valid}
# Load snapshot
load_checkpoint(args.checkpoint_json_path, {})
# Data Iterator
valid_data_iter = data_iterator_modelnet40_normal_resampled(
args.data_dir,
valid_batch_size,
False,
False,
args.num_points,
normalize=True,
with_normal=args.with_normal,
)
logger.info(f"Validation dataset size: {valid_data_iter.size}")
# Evaluation
logger.info(f"Evaluation starting ...")
accuracy, loss = eval_one_epoch(
valid_data_iter, valid_vars, valid_loss_vars)
logger.info("accuracy: {}".format(accuracy))
logger.info("loss: {}".format(loss))
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_dir", type=str, default=os.path.join(os.path.dirname(__file__), "data", "modelnet40_normal_resampled")
)
parser.add_argument("--model_type", type=str,
default="ssg", choices=["msg", "ssg"])
parser.add_argument("--num_classes", type=int, default=40)
parser.add_argument("--num_points", type=int, default=1024)
parser.add_argument("--with_normal", action="store_true")
parser.add_argument("--device_id", type=int, default=0)
parser.add_argument("--context", type=str, default="cudnn")
parser.add_argument(
"--checkpoint_json_path",
type=str,
default="./pointnet2_classification_result/seed_100/checkpoint_best/checkpoint_best.json",
)
args = parser.parse_args()
evaluate(args)
if __name__ == "__main__":
main()
|
f5b3795760a87ca73389397ce08583e990a62025
|
01857ef455ea60eccaf03b5a9059ec83e9803c2e
|
/nicegui/elements/chart.py
|
c5b9bc3bd29361c16c1c42baf4cfe7d54e694521
|
[
"MIT"
] |
permissive
|
zauberzeug/nicegui
|
f08312cc1f393deca79e0e84a2506d3a35efff16
|
c61b1315f29d51e26cc1168207f5616b302f8df0
|
refs/heads/main
| 2023-08-18T18:09:30.937322
| 2023-08-18T15:04:00
| 2023-08-18T15:04:00
| 365,250,183
| 5,128
| 271
|
MIT
| 2023-09-14T01:50:56
| 2021-05-07T13:55:05
|
Python
|
UTF-8
|
Python
| false
| false
| 4,578
|
py
|
chart.py
|
from typing import Callable, Dict, List, Optional
from ..element import Element
from ..events import (ChartPointClickEventArguments, ChartPointDragEventArguments, ChartPointDragStartEventArguments,
ChartPointDropEventArguments, GenericEventArguments, handle_event)
class Chart(Element,
component='chart.js',
libraries=['lib/highcharts/*.js'],
extra_libraries=['lib/highcharts/modules/*.js']):
def __init__(self, options: Dict, *,
type: str = 'chart', extras: List[str] = [],
on_point_click: Optional[Callable] = None,
on_point_drag_start: Optional[Callable] = None,
on_point_drag: Optional[Callable] = None,
on_point_drop: Optional[Callable] = None,
) -> None:
"""Chart
An element to create a chart using `Highcharts <https://www.highcharts.com/>`_.
Updates can be pushed to the chart by changing the `options` property.
After data has changed, call the `update` method to refresh the chart.
By default, a `Highcharts.chart` is created.
To use, e.g., `Highcharts.stockChart` instead, set the `type` property to "stockChart".
:param options: dictionary of Highcharts options
:param type: chart type (e.g. "chart", "stockChart", "mapChart", ...; default: "chart")
:param extras: list of extra dependencies to include (e.g. "annotations", "arc-diagram", "solid-gauge", ...)
:param on_point_click: callback function that is called when a point is clicked
:param on_point_drag_start: callback function that is called when a point drag starts
:param on_point_drag: callback function that is called when a point is dragged
:param on_point_drop: callback function that is called when a point is dropped
"""
super().__init__()
self._props['type'] = type
self._props['options'] = options
self._props['extras'] = extras
self.libraries.extend(library for library in self.extra_libraries if library.path.stem in extras)
if on_point_click:
def handle_point_click(e: GenericEventArguments) -> None:
handle_event(on_point_click, ChartPointClickEventArguments(
sender=self,
client=self.client,
event_type='point_click',
point_index=e.args['point_index'],
point_x=e.args['point_x'],
point_y=e.args['point_y'],
series_index=e.args['series_index'],
))
self.on('pointClick', handle_point_click, ['point_index', 'point_x', 'point_y', 'series_index'])
if on_point_drag_start:
def handle_point_dragStart(_: GenericEventArguments) -> None:
handle_event(on_point_drag_start, ChartPointDragStartEventArguments(
sender=self,
client=self.client,
event_type='point_drag_start',
))
self.on('pointDragStart', handle_point_dragStart, [])
if on_point_drag:
def handle_point_drag(e: GenericEventArguments) -> None:
handle_event(on_point_drag, ChartPointDragEventArguments(
sender=self,
client=self.client,
event_type='point_drag',
point_index=e.args['point_index'],
point_x=e.args['point_x'],
point_y=e.args['point_y'],
series_index=e.args['series_index'],
))
self.on('pointDrag', handle_point_drag, ['point_index', 'point_x', 'point_y', 'series_index'])
if on_point_drop:
def handle_point_drop(e: GenericEventArguments) -> None:
handle_event(on_point_drop, ChartPointDropEventArguments(
sender=self,
client=self.client,
event_type='point_drop',
point_index=e.args['point_index'],
point_x=e.args['point_x'],
point_y=e.args['point_y'],
series_index=e.args['series_index'],
))
self.on('pointDrop', handle_point_drop, ['point_index', 'point_x', 'point_y', 'series_index'])
@property
def options(self) -> Dict:
return self._props['options']
def update(self) -> None:
super().update()
self.run_method('update_chart')
|
160ba664cee103f37f2245b4aa2beb391d2b5437
|
082cb56436631f16585dc6c667a8b384cee3335f
|
/script/talk/source/t301500.py
|
b44c91859e8e2e843beca482ccece7ebe5778f05
|
[] |
no_license
|
vawser/Cinders-DS3
|
abf2c5e1c163f2e556a0d89e437eead3ddd6992c
|
d086ebce45b27806f757e04778dad1615e405dab
|
refs/heads/master
| 2023-09-01T00:48:00.500866
| 2023-08-07T12:25:24
| 2023-08-07T12:25:24
| 230,333,994
| 192
| 203
| null | 2022-02-13T21:09:26
| 2019-12-26T22:08:06
|
Python
|
UTF-8
|
Python
| false
| false
| 7,515
|
py
|
t301500.py
|
#-------------------------------------------
#-- Sorcery Scribe
#-------------------------------------------
# -*- coding: utf-8 -*-
#----------------------------------------------------
# Main Loop
#----------------------------------------------------
def t301500_1():
""" State 0,1 """
assert GetCurrentStateElapsedTime() > 1
""" State 2 """
while True:
call = t301500_x0() # Host Player
assert IsClientPlayer() == 1
""" State 3 """
call = t301500_x1() # Client Player
assert not IsClientPlayer()
# Host Player
def t301500_x0():
""" State 0,1 """
while True:
call = t301500_x3()
# Client Player
def t301500_x1():
""" State 0,1 """
assert t301500_x2() # Clear Talk State
""" State 2 """
return 0
# Clear Talk State
def t301500_x2():
""" State 0,1 """
if not CheckSpecificPersonTalkHasEnded(0):
""" State 7 """
ClearTalkProgressData()
StopEventAnimWithoutForcingConversationEnd(0)
""" State 6 """
ReportConversationEndToHavokBehavior()
else:
pass
""" State 2 """
if CheckSpecificPersonGenericDialogIsOpen(0) == 1:
""" State 3 """
ForceCloseGenericDialog()
else:
pass
""" State 4 """
if CheckSpecificPersonMenuIsOpen(-1, 0) == 1 and not CheckSpecificPersonGenericDialogIsOpen(0):
""" State 5 """
ForceCloseMenu()
else:
pass
""" State 8 """
return 0
# Check Death
def t301500_x3():
""" State 0,1 """
call = t301500_x4() # NPC Loop
assert CheckSelfDeath() == 1
return 0
# NPC Loop
def t301500_x4():
""" State 0,5 """
while True:
call = t301500_x5() # Interaction State
if call.Done():
""" State 3 """
call = t301500_x8() # Menu Pre-loop
if call.Done():
pass
elif IsAttackedBySomeone() == 1:
""" State 1 """
Label('L0')
call = t301500_x6() # Attack Check
def ExitPause():
RemoveMyAggro()
if call.Done():
pass
elif IsPlayerDead() == 1:
break
elif IsPlayerDead() == 1:
break
elif GetDistanceToPlayer() > 2 or GetPlayerYDistance() > 0.25:
""" State 4 """
call = t301500_x7() # Distance Check
if call.Done() and (GetDistanceToPlayer() < 1.5 and GetPlayerYDistance() < 0.249):
pass
elif IsAttackedBySomeone() == 1:
Goto('L0')
elif IsAttackedBySomeone() == 1:
Goto('L0')
elif IsPlayerDead() == 1:
break
""" State 2 """
t301500_x2() # Clear Talk State
# Interaction State
def t301500_x5():
""" State 0,1 """
while True:
assert (not GetOneLineHelpStatus() and not IsTalkingToSomeoneElse() and not IsClientPlayer()
and not IsPlayerDead() and not IsCharacterDisabled())
""" State 2 """
if (not (not GetOneLineHelpStatus() and not IsTalkingToSomeoneElse() and not IsClientPlayer()
and not IsPlayerDead() and not IsCharacterDisabled())):
pass
elif CheckActionButtonArea(6120):
break
""" State 4 """
return 0
# Attack Check
def t301500_x6():
""" State 0,6 """
assert t301500_x2() # Clear Talk State
""" State 3 """
assert GetCurrentStateElapsedFrames() > 1
""" State 2 """
if GetDistanceToPlayer() > 3:
""" State 7 """
assert t301500_x2() # Clear Talk State
else:
""" State 5 """
pass
""" State 9 """
return 0
# Distance Check
def t301500_x7():
""" State 0,1 """
if (CheckSpecificPersonMenuIsOpen(-1, 0) == 1 and not
CheckSpecificPersonGenericDialogIsOpen(0)):
""" State 2,5 """
if GetDistanceToPlayer() > 3:
""" State 4 """
Label('L0')
assert t301500_x2() # Clear Talk State
else:
""" State 3 """
Goto('L0')
""" State 6 """
return 0
# Menu Pre-loop
def t301500_x8():
""" State 0,1 """
SetEventState(73109000, 0)
assert t301500_x9()
""" State 24 """
return 0
# Menu Loop
def t301500_x9():
c1110()
while True:
ClearTalkListData()
# Ascension
AddTalkListData(1, 15004001, -1)
# Talk
AddTalkListData(3, 10010200, -1)
# Leave
AddTalkListData(99, 15000005, -1)
assert (not CheckSpecificPersonGenericDialogIsOpen(2) and not (CheckSpecificPersonMenuIsOpen(-1, 2) == 1 and not CheckSpecificPersonGenericDialogIsOpen(2)))
ShowShopMessage(1)
# Ascension
if GetTalkListEntryResult() == 1:
assert t301500_x20()
continue
# Talk
elif GetTalkListEntryResult() == 3:
assert t301500_x10(text1=10021000, flag1=0, mode1=0)
continue
# Leave
elif GetTalkListEntryResult() == 99:
ReportConversationEndToHavokBehavior()
return 0
elif not (CheckSpecificPersonMenuIsOpen(-1, 0) == 1 and not CheckSpecificPersonGenericDialogIsOpen(0)):
return 0
# Talk Function
def t301500_x10(text1=_, flag1=0, mode1=_):
""" State 0,4 """
assert t301500_x11() and CheckSpecificPersonTalkHasEnded(0) == 1
""" State 1 """
TalkToPlayer(text1, -1, -1, flag1)
assert CheckSpecificPersonTalkHasEnded(0) == 1
""" State 3 """
if not mode1:
pass
else:
""" State 2 """
ReportConversationEndToHavokBehavior()
""" State 5 """
return 0
# Talk Cleanup
def t301500_x11():
""" State 0,1 """
ClearTalkProgressData()
StopEventAnimWithoutForcingConversationEnd(0)
ForceCloseGenericDialog()
ForceCloseMenu()
ReportConversationEndToHavokBehavior()
""" State 2 """
return 0
#----------------------------------------------------
# Ascension
#----------------------------------------------------
def t301500_x20():
c1110()
while True:
ClearTalkListData()
# Magic Weapon
AddTalkListData(1, 15004001, -1)
# Leave
AddTalkListData(99, 15000005, -1)
assert (not CheckSpecificPersonGenericDialogIsOpen(2) and not (CheckSpecificPersonMenuIsOpen(-1, 2) == 1 and not CheckSpecificPersonGenericDialogIsOpen(2)))
ShowShopMessage(1)
# Purchase Sidearms
if GetTalkListEntryResult() == 1:
OpenRegularShop(270000, 279999)
continue
# Leave
elif GetTalkListEntryResult() == 99:
ReportConversationEndToHavokBehavior()
return 0
elif not (CheckSpecificPersonMenuIsOpen(-1, 0) == 1 and not CheckSpecificPersonGenericDialogIsOpen(0)):
return 0
#----------------------------------------------------
# Utility
#----------------------------------------------------
# Acquire Gesture
def t301500_x50(z2=_, z3=_, flag1=_):
""" State 0,1 """
if GetEventStatus(flag1) == 1:
""" State 2 """
pass
else:
""" State 3,4 """
AcquireGesture(z2)
OpenItemAcquisitionMenu(3, z3, 1)
SetEventState(flag1, 1)
assert not IsMenuOpen(63) and GetCurrentStateElapsedFrames() > 1
""" State 5 """
return 0
|
cd499cb2ce7cc7241ba6a5c8415454b80c000fa9
|
e76a79816ff5203be2c4061e263a09d31072c940
|
/third-party/py/pex/pex/compatibility.py
|
1a86d5f8be90568b0efa755c47f57521dc534787
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
facebook/buck
|
ef3a833334499b1b44c586e9bc5e2eec8d930e09
|
9c7c421e49f4d92d67321f18c6d1cd90974c77c4
|
refs/heads/main
| 2023-08-25T19:30:28.803205
| 2023-04-19T11:32:59
| 2023-04-19T11:32:59
| 9,504,214
| 8,481
| 1,338
|
Apache-2.0
| 2023-05-04T22:13:59
| 2013-04-17T18:12:18
|
Java
|
UTF-8
|
Python
| false
| false
| 2,248
|
py
|
compatibility.py
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
# This file contains several 2.x/3.x compatibility checkstyle violations for a reason
# checkstyle: noqa
from abc import ABCMeta
from numbers import Integral, Real
from sys import version_info as sys_version_info
# TODO(wickman) Since the io package is available in 2.6.x, use that instead of
# cStringIO/StringIO
try:
# CPython 2.x
from cStringIO import StringIO
except ImportError:
try:
# Python 2.x
from StringIO import StringIO
except:
# Python 3.x
from io import StringIO
from io import BytesIO
AbstractClass = ABCMeta('AbstractClass', (object,), {})
PY2 = sys_version_info[0] == 2
PY3 = sys_version_info[0] == 3
StringIO = StringIO
BytesIO = BytesIO if PY3 else StringIO
integer = (Integral,)
real = (Real,)
numeric = integer + real
string = (str,) if PY3 else (str, unicode)
bytes = (bytes,)
if PY2:
def to_bytes(st, encoding='utf-8'):
if isinstance(st, unicode):
return st.encode(encoding)
elif isinstance(st, bytes):
return st
else:
raise ValueError('Cannot convert %s to bytes' % type(st))
else:
def to_bytes(st, encoding='utf-8'):
if isinstance(st, str):
return st.encode(encoding)
elif isinstance(st, bytes):
return st
else:
raise ValueError('Cannot convert %s to bytes.' % type(st))
_PY3_EXEC_FUNCTION = """
def exec_function(ast, globals_map):
locals_map = globals_map
exec ast in globals_map, locals_map
return locals_map
"""
if PY3:
def exec_function(ast, globals_map):
locals_map = globals_map
exec(ast, globals_map, locals_map)
return locals_map
else:
eval(compile(_PY3_EXEC_FUNCTION, "<exec_function>", "exec"))
if PY3:
from contextlib import contextmanager, ExitStack
@contextmanager
def nested(*context_managers):
enters = []
with ExitStack() as stack:
for manager in context_managers:
enters.append(stack.enter_context(manager))
yield tuple(enters)
else:
from contextlib import nested
__all__ = (
'AbstractClass',
'BytesIO',
'PY2',
'PY3',
'StringIO',
'bytes',
'exec_function',
'nested',
'string',
'to_bytes',
)
|
3d7301525696e593b42a96eabc4eda52264a247f
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/async_samples/sample_query_knowledgebase_async.py
|
e0ac42060591a0b66b6dc28154d7a1f016c694ba
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 2,176
|
py
|
sample_query_knowledgebase_async.py
|
# coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
"""
FILE: sample_query_knowledgebase_async.py
DESCRIPTION:
This sample demonstrates how to ask a question from a knowledge base.
USAGE:
python sample_query_knowledgebase_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_QUESTIONANSWERING_ENDPOINT - the endpoint to your QuestionAnswering resource.
2) AZURE_QUESTIONANSWERING_KEY - your QuestionAnswering API key.
3) AZURE_QUESTIONANSWERING_PROJECT - the name of a knowledge base project.
"""
import asyncio
async def sample_query_knowledgebase():
# [START query_knowledgebase_async]
import os
from azure.core.credentials import AzureKeyCredential
from azure.ai.language.questionanswering.aio import QuestionAnsweringClient
from azure.ai.language.questionanswering import models as qna
endpoint = os.environ["AZURE_QUESTIONANSWERING_ENDPOINT"]
key = os.environ["AZURE_QUESTIONANSWERING_KEY"]
knowledge_base_project = os.environ["AZURE_QUESTIONANSWERING_PROJECT"]
client = QuestionAnsweringClient(endpoint, AzureKeyCredential(key))
async with client:
question="How long should my Surface battery last?"
output = await client.get_answers(
question=question,
top=3,
confidence_threshold=0.2,
include_unstructured_sources=True,
short_answer_options=qna.ShortAnswerOptions(
confidence_threshold=0.2,
top=1
),
project_name=knowledge_base_project,
deployment_name="test"
)
if output.answers:
best_candidate = [a for a in output.answers if a.confidence and a.confidence > 0.7][0]
print("Q: {}".format(question))
print("A: {}".format(best_candidate.answer))
else:
print(f"No answers returned from question '{question}'")
# [END query_knowledgebase_async]
if __name__ == '__main__':
asyncio.run(sample_query_knowledgebase())
|
01100216dfb458ba7222d311127e0932afd2677c
|
14a42aa9e707f70312647fbf86adb96fce7a2f97
|
/testing/code/test_code.py
|
33809528a06b0b2ef19fa789d91df381257da0e0
|
[
"MIT"
] |
permissive
|
pytest-dev/pytest
|
a0374d435f2b46e8a475b4b26085ab4f3d04aa67
|
afb8d66e42a3449476cf9bf4526705b1e36ff5a5
|
refs/heads/main
| 2023-09-01T14:12:11.863580
| 2023-08-30T11:52:14
| 2023-08-30T11:52:14
| 37,489,525
| 11,423
| 3,125
|
MIT
| 2023-09-12T22:17:22
| 2015-06-15T20:28:27
|
Python
|
UTF-8
|
Python
| false
| false
| 5,501
|
py
|
test_code.py
|
import re
import sys
from types import FrameType
from unittest import mock
import pytest
from _pytest._code import Code
from _pytest._code import ExceptionInfo
from _pytest._code import Frame
from _pytest._code import Source
from _pytest._code.code import ExceptionChainRepr
from _pytest._code.code import ReprFuncArgs
def test_ne() -> None:
code1 = Code(compile('foo = "bar"', "", "exec"))
assert code1 == code1
code2 = Code(compile('foo = "baz"', "", "exec"))
assert code2 != code1
def test_code_gives_back_name_for_not_existing_file() -> None:
name = "abc-123"
co_code = compile("pass\n", name, "exec")
assert co_code.co_filename == name
code = Code(co_code)
assert str(code.path) == name
assert code.fullsource is None
def test_code_from_function_with_class() -> None:
class A:
pass
with pytest.raises(TypeError):
Code.from_function(A)
def x() -> None:
raise NotImplementedError()
def test_code_fullsource() -> None:
code = Code.from_function(x)
full = code.fullsource
assert "test_code_fullsource()" in str(full)
def test_code_source() -> None:
code = Code.from_function(x)
src = code.source()
expected = """def x() -> None:
raise NotImplementedError()"""
assert str(src) == expected
def test_frame_getsourcelineno_myself() -> None:
def func() -> FrameType:
return sys._getframe(0)
f = Frame(func())
source, lineno = f.code.fullsource, f.lineno
assert source is not None
assert source[lineno].startswith(" return sys._getframe(0)")
def test_getstatement_empty_fullsource() -> None:
def func() -> FrameType:
return sys._getframe(0)
f = Frame(func())
with mock.patch.object(f.code.__class__, "fullsource", None):
assert f.statement == Source("")
def test_code_from_func() -> None:
co = Code.from_function(test_frame_getsourcelineno_myself)
assert co.firstlineno
assert co.path
def test_unicode_handling() -> None:
value = "ąć".encode()
def f() -> None:
raise Exception(value)
excinfo = pytest.raises(Exception, f)
str(excinfo)
def test_code_getargs() -> None:
def f1(x):
raise NotImplementedError()
c1 = Code.from_function(f1)
assert c1.getargs(var=True) == ("x",)
def f2(x, *y):
raise NotImplementedError()
c2 = Code.from_function(f2)
assert c2.getargs(var=True) == ("x", "y")
def f3(x, **z):
raise NotImplementedError()
c3 = Code.from_function(f3)
assert c3.getargs(var=True) == ("x", "z")
def f4(x, *y, **z):
raise NotImplementedError()
c4 = Code.from_function(f4)
assert c4.getargs(var=True) == ("x", "y", "z")
def test_frame_getargs() -> None:
def f1(x) -> FrameType:
return sys._getframe(0)
fr1 = Frame(f1("a"))
assert fr1.getargs(var=True) == [("x", "a")]
def f2(x, *y) -> FrameType:
return sys._getframe(0)
fr2 = Frame(f2("a", "b", "c"))
assert fr2.getargs(var=True) == [("x", "a"), ("y", ("b", "c"))]
def f3(x, **z) -> FrameType:
return sys._getframe(0)
fr3 = Frame(f3("a", b="c"))
assert fr3.getargs(var=True) == [("x", "a"), ("z", {"b": "c"})]
def f4(x, *y, **z) -> FrameType:
return sys._getframe(0)
fr4 = Frame(f4("a", "b", c="d"))
assert fr4.getargs(var=True) == [("x", "a"), ("y", ("b",)), ("z", {"c": "d"})]
class TestExceptionInfo:
def test_bad_getsource(self) -> None:
try:
if False:
pass
else:
assert False
except AssertionError:
exci = ExceptionInfo.from_current()
assert exci.getrepr()
def test_from_current_with_missing(self) -> None:
with pytest.raises(AssertionError, match="no current exception"):
ExceptionInfo.from_current()
class TestTracebackEntry:
def test_getsource(self) -> None:
try:
if False:
pass
else:
assert False
except AssertionError:
exci = ExceptionInfo.from_current()
entry = exci.traceback[0]
source = entry.getsource()
assert source is not None
assert len(source) == 6
assert "assert False" in source[5]
def test_tb_entry_str(self):
try:
assert False
except AssertionError:
exci = ExceptionInfo.from_current()
pattern = r" File '.*test_code.py':\d+ in test_tb_entry_str\n assert False"
entry = str(exci.traceback[0])
assert re.match(pattern, entry)
class TestReprFuncArgs:
def test_not_raise_exception_with_mixed_encoding(self, tw_mock) -> None:
args = [("unicode_string", "São Paulo"), ("utf8_string", b"S\xc3\xa3o Paulo")]
r = ReprFuncArgs(args)
r.toterminal(tw_mock)
assert (
tw_mock.lines[0]
== r"unicode_string = São Paulo, utf8_string = b'S\xc3\xa3o Paulo'"
)
def test_ExceptionChainRepr():
"""Test ExceptionChainRepr, especially with regard to being hashable."""
try:
raise ValueError()
except ValueError:
excinfo1 = ExceptionInfo.from_current()
excinfo2 = ExceptionInfo.from_current()
repr1 = excinfo1.getrepr()
repr2 = excinfo2.getrepr()
assert repr1 != repr2
assert isinstance(repr1, ExceptionChainRepr)
assert hash(repr1) != hash(repr2)
assert repr1 is not excinfo1.getrepr()
|
be39f20e2c06ee7740ec91e8534f4f0446753dbe
|
8da41ffa2ccb09e04f95db0f211e0ed69a42a352
|
/courses/machine_learning/deepdive2/end_to_end_ml/solutions/pipelines/containers/pipeline/main.py
|
47534ea80576fcf2ff99aff1c30144e917964fad
|
[
"Apache-2.0"
] |
permissive
|
GoogleCloudPlatform/training-data-analyst
|
808af9b09a0e5f5657c4ca76cdd205f808d76d89
|
975a95032ce5b7012d1772c7f1f5cfe606eae839
|
refs/heads/master
| 2023-09-05T19:50:59.722334
| 2023-09-04T14:25:33
| 2023-09-04T14:25:33
| 56,459,948
| 7,311
| 5,917
|
Apache-2.0
| 2023-09-13T21:45:54
| 2016-04-17T21:39:27
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,570
|
py
|
main.py
|
# Copyright 2020 Google, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
import json
import os
def handle_newfile(data, context):
"""Background Cloud Function to be triggered by Cloud Storage.
This generic function calls the Cloud Run URL endpoint.
Args:
data (dict): The Cloud Functions event payload.
context (google.cloud.functions.Context): Metadata of triggering event.
Returns:
None; the output is written to Stackdriver Logging
"""
payload = {
'bucket' : data['bucket'],
'filename': data['name']
}
# Notes:
# (1) Ideally, we can simply invoke mlp_babyweight.finetune from here
# However, kfp.Client() has dependencies on binaries that are not available in Cloud Functions
# Hence, this workaround of putting mlp_babyweight.py in a Docker container and invoking it
# via Cloud Run
# (2) We could reduce the traffic to Cloud Run by checking filename pattern here
# but for reusability and maintainability reasons, I'm keeping this
# Cloud Function as a simple pass-through
# receiving service url
url = os.environ.get('DESTINATION_URL', "No DESTINATION_URL")
print("Invoking Cloud Run at {} with {}".format(url, payload))
# See https://cloud.google.com/run/docs/authenticating/service-to-service
metadata_server_token_url = 'http://metadata/computeMetadata/v1/instance/service-accounts/default/identity?audience='
token_request_url = metadata_server_token_url + url
token_request_headers = {'Metadata-Flavor': 'Google'}
token_response = requests.get(token_request_url, headers=token_request_headers)
jwt = token_response.content.decode("utf-8")
# Provide the token in the request to the receiving service
headers = {
'Authorization': f'bearer {jwt}',
'Content-Type':'application/json'
}
print("Headers = {}".format(headers))
resp = requests.post(url, data=json.dumps(payload), headers=headers)
return (resp.status_code == requests.codes.ok)
|
5dc6292833f07ec832ac01193eca987b4484c166
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/pytorch/source/caffe2/python/test_util.py
|
947975f2fe59a4e49c415622ac5fe162cb389327
|
[
"MIT"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,687
|
py
|
test_util.py
|
## @package test_util
# Module caffe2.python.test_util
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from caffe2.python import core, workspace
import unittest
import os
def rand_array(*dims):
# np.random.rand() returns float instead of 0-dim array, that's why need to
# do some tricks
return np.array(np.random.rand(*dims) - 0.5).astype(np.float32)
def randBlob(name, type, *dims, **kwargs):
offset = kwargs['offset'] if 'offset' in kwargs else 0.0
workspace.FeedBlob(name, np.random.rand(*dims).astype(type) + offset)
def randBlobFloat32(name, *dims, **kwargs):
randBlob(name, np.float32, *dims, **kwargs)
def randBlobsFloat32(names, *dims, **kwargs):
for name in names:
randBlobFloat32(name, *dims, **kwargs)
def numOps(net):
return len(net.Proto().op)
def str_compare(a, b, encoding="utf8"):
if isinstance(a, bytes):
a = a.decode(encoding)
if isinstance(b, bytes):
b = b.decode(encoding)
return a == b
class TestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
workspace.GlobalInit([
'caffe2',
'--caffe2_log_level=0',
'--caffe2_cpu_allocator_do_zero_fill=0',
'--caffe2_cpu_allocator_do_junk_fill=1',
])
# clear the default engines settings to separate out its
# affect from the ops tests
core.SetEnginePref({}, {})
def setUp(self):
self.ws = workspace.C.Workspace()
workspace.ResetWorkspace()
def tearDown(self):
workspace.ResetWorkspace()
|
c405f32086495e20f1c641aea31b8b995311c8c2
|
a41e1498e3c080f47abd8e8e57157548df3ebbf1
|
/doc/make.py
|
9db4ea406bc1f1e53477645a46477faacf53741e
|
[
"BSD-3-Clause"
] |
permissive
|
pandas-dev/pandas
|
e7e639454a298bebc272622e66faa9829ea393bb
|
c7325d7e7e77ecb4a4e57b48bc25265277c75712
|
refs/heads/main
| 2023-09-01T12:42:07.927176
| 2023-09-01T11:14:10
| 2023-09-01T11:14:10
| 858,127
| 36,166
| 18,728
|
BSD-3-Clause
| 2023-09-14T21:18:41
| 2010-08-24T01:37:33
|
Python
|
UTF-8
|
Python
| false
| false
| 11,982
|
py
|
make.py
|
#!/usr/bin/env python3
"""
Python script for building documentation.
To build the docs you must have all optional dependencies for pandas
installed. See the installation instructions for a list of these.
Usage
-----
$ python make.py clean
$ python make.py html
$ python make.py latex
"""
import argparse
import csv
import importlib
import os
import shutil
import subprocess
import sys
import webbrowser
import docutils
import docutils.parsers.rst
DOC_PATH = os.path.dirname(os.path.abspath(__file__))
SOURCE_PATH = os.path.join(DOC_PATH, "source")
BUILD_PATH = os.path.join(DOC_PATH, "build")
REDIRECTS_FILE = os.path.join(DOC_PATH, "redirects.csv")
class DocBuilder:
"""
Class to wrap the different commands of this script.
All public methods of this class can be called as parameters of the
script.
"""
def __init__(
self,
num_jobs="auto",
include_api=True,
whatsnew=False,
single_doc=None,
verbosity=0,
warnings_are_errors=False,
) -> None:
self.num_jobs = num_jobs
self.include_api = include_api
self.whatsnew = whatsnew
self.verbosity = verbosity
self.warnings_are_errors = warnings_are_errors
if single_doc:
single_doc = self._process_single_doc(single_doc)
os.environ["SPHINX_PATTERN"] = single_doc
elif not include_api:
os.environ["SPHINX_PATTERN"] = "-api"
elif whatsnew:
os.environ["SPHINX_PATTERN"] = "whatsnew"
self.single_doc_html = None
if single_doc and single_doc.endswith(".rst"):
self.single_doc_html = os.path.splitext(single_doc)[0] + ".html"
elif single_doc:
self.single_doc_html = f"reference/api/pandas.{single_doc}.html"
def _process_single_doc(self, single_doc):
"""
Make sure the provided value for --single is a path to an existing
.rst/.ipynb file, or a pandas object that can be imported.
For example, categorial.rst or pandas.DataFrame.head. For the latter,
return the corresponding file path
(e.g. reference/api/pandas.DataFrame.head.rst).
"""
base_name, extension = os.path.splitext(single_doc)
if extension in (".rst", ".ipynb"):
if os.path.exists(os.path.join(SOURCE_PATH, single_doc)):
return single_doc
else:
raise FileNotFoundError(f"File {single_doc} not found")
elif single_doc.startswith("pandas."):
try:
obj = pandas # noqa: F821
for name in single_doc.split("."):
obj = getattr(obj, name)
except AttributeError as err:
raise ImportError(f"Could not import {single_doc}") from err
else:
return single_doc[len("pandas.") :]
else:
raise ValueError(
f"--single={single_doc} not understood. "
"Value should be a valid path to a .rst or .ipynb file, "
"or a valid pandas object "
"(e.g. categorical.rst or pandas.DataFrame.head)"
)
@staticmethod
def _run_os(*args):
"""
Execute a command as a OS terminal.
Parameters
----------
*args : list of str
Command and parameters to be executed
Examples
--------
>>> DocBuilder()._run_os('python', '--version')
"""
subprocess.check_call(args, stdout=sys.stdout, stderr=sys.stderr)
def _sphinx_build(self, kind: str):
"""
Call sphinx to build documentation.
Attribute `num_jobs` from the class is used.
Parameters
----------
kind : {'html', 'latex'}
Examples
--------
>>> DocBuilder(num_jobs=4)._sphinx_build('html')
"""
if kind not in ("html", "latex"):
raise ValueError(f"kind must be html or latex, not {kind}")
cmd = ["sphinx-build", "-b", kind]
if self.num_jobs:
cmd += ["-j", self.num_jobs]
if self.warnings_are_errors:
cmd += ["-W", "--keep-going"]
if self.verbosity:
cmd.append(f"-{'v' * self.verbosity}")
cmd += [
"-d",
os.path.join(BUILD_PATH, "doctrees"),
SOURCE_PATH,
os.path.join(BUILD_PATH, kind),
]
return subprocess.call(cmd)
def _open_browser(self, single_doc_html):
"""
Open a browser tab showing single
"""
url = os.path.join("file://", DOC_PATH, "build", "html", single_doc_html)
webbrowser.open(url, new=2)
def _get_page_title(self, page):
"""
Open the rst file `page` and extract its title.
"""
fname = os.path.join(SOURCE_PATH, f"{page}.rst")
doc = docutils.utils.new_document(
"<doc>",
docutils.frontend.get_default_settings(docutils.parsers.rst.Parser),
)
with open(fname, encoding="utf-8") as f:
data = f.read()
parser = docutils.parsers.rst.Parser()
# do not generate any warning when parsing the rst
with open(os.devnull, "a", encoding="utf-8") as f:
doc.reporter.stream = f
parser.parse(data, doc)
section = next(
node for node in doc.children if isinstance(node, docutils.nodes.section)
)
title = next(
node for node in section.children if isinstance(node, docutils.nodes.title)
)
return title.astext()
def _add_redirects(self):
"""
Create in the build directory an html file with a redirect,
for every row in REDIRECTS_FILE.
"""
with open(REDIRECTS_FILE, encoding="utf-8") as mapping_fd:
reader = csv.reader(mapping_fd)
for row in reader:
if not row or row[0].strip().startswith("#"):
continue
html_path = os.path.join(BUILD_PATH, "html")
path = os.path.join(html_path, *row[0].split("/")) + ".html"
if not self.include_api and (
os.path.join(html_path, "reference") in path
or os.path.join(html_path, "generated") in path
):
continue
try:
title = self._get_page_title(row[1])
except Exception:
# the file can be an ipynb and not an rst, or docutils
# may not be able to read the rst because it has some
# sphinx specific stuff
title = "this page"
with open(path, "w", encoding="utf-8") as moved_page_fd:
html = f"""\
<html>
<head>
<meta http-equiv="refresh" content="0;URL={row[1]}.html"/>
</head>
<body>
<p>
The page has been moved to <a href="{row[1]}.html">{title}</a>
</p>
</body>
<html>"""
moved_page_fd.write(html)
def html(self):
"""
Build HTML documentation.
"""
ret_code = self._sphinx_build("html")
zip_fname = os.path.join(BUILD_PATH, "html", "pandas.zip")
if os.path.exists(zip_fname):
os.remove(zip_fname)
if ret_code == 0:
if self.single_doc_html is not None:
self._open_browser(self.single_doc_html)
else:
self._add_redirects()
if self.whatsnew:
self._open_browser(os.path.join("whatsnew", "index.html"))
return ret_code
def latex(self, force=False):
"""
Build PDF documentation.
"""
if sys.platform == "win32":
sys.stderr.write("latex build has not been tested on windows\n")
else:
ret_code = self._sphinx_build("latex")
os.chdir(os.path.join(BUILD_PATH, "latex"))
if force:
for i in range(3):
self._run_os("pdflatex", "-interaction=nonstopmode", "pandas.tex")
raise SystemExit(
"You should check the file "
'"build/latex/pandas.pdf" for problems.'
)
self._run_os("make")
return ret_code
def latex_forced(self):
"""
Build PDF documentation with retries to find missing references.
"""
return self.latex(force=True)
@staticmethod
def clean():
"""
Clean documentation generated files.
"""
shutil.rmtree(BUILD_PATH, ignore_errors=True)
shutil.rmtree(os.path.join(SOURCE_PATH, "reference", "api"), ignore_errors=True)
def zip_html(self):
"""
Compress HTML documentation into a zip file.
"""
zip_fname = os.path.join(BUILD_PATH, "html", "pandas.zip")
if os.path.exists(zip_fname):
os.remove(zip_fname)
dirname = os.path.join(BUILD_PATH, "html")
fnames = os.listdir(dirname)
os.chdir(dirname)
self._run_os("zip", zip_fname, "-r", "-q", *fnames)
def main():
cmds = [method for method in dir(DocBuilder) if not method.startswith("_")]
joined = ",".join(cmds)
argparser = argparse.ArgumentParser(
description="pandas documentation builder", epilog=f"Commands: {joined}"
)
joined = ", ".join(cmds)
argparser.add_argument(
"command", nargs="?", default="html", help=f"command to run: {joined}"
)
argparser.add_argument(
"--num-jobs", default="auto", help="number of jobs used by sphinx-build"
)
argparser.add_argument(
"--no-api", default=False, help="omit api and autosummary", action="store_true"
)
argparser.add_argument(
"--whatsnew",
default=False,
help="only build whatsnew (and api for links)",
action="store_true",
)
argparser.add_argument(
"--single",
metavar="FILENAME",
type=str,
default=None,
help=(
"filename (relative to the 'source' folder) of section or method name to "
"compile, e.g. 'development/contributing.rst', "
"'pandas.DataFrame.join'"
),
)
argparser.add_argument(
"--python-path", type=str, default=os.path.dirname(DOC_PATH), help="path"
)
argparser.add_argument(
"-v",
action="count",
dest="verbosity",
default=0,
help=(
"increase verbosity (can be repeated), "
"passed to the sphinx build command"
),
)
argparser.add_argument(
"--warnings-are-errors",
"-W",
action="store_true",
help="fail if warnings are raised",
)
args = argparser.parse_args()
if args.command not in cmds:
joined = ", ".join(cmds)
raise ValueError(f"Unknown command {args.command}. Available options: {joined}")
# Below we update both os.environ and sys.path. The former is used by
# external libraries (namely Sphinx) to compile this module and resolve
# the import of `python_path` correctly. The latter is used to resolve
# the import within the module, injecting it into the global namespace
os.environ["PYTHONPATH"] = args.python_path
sys.path.insert(0, args.python_path)
globals()["pandas"] = importlib.import_module("pandas")
# Set the matplotlib backend to the non-interactive Agg backend for all
# child processes.
os.environ["MPLBACKEND"] = "module://matplotlib.backends.backend_agg"
builder = DocBuilder(
args.num_jobs,
not args.no_api,
args.whatsnew,
args.single,
args.verbosity,
args.warnings_are_errors,
)
return getattr(builder, args.command)()
if __name__ == "__main__":
sys.exit(main())
|
d31e2aba473b57adf1376973dff3347ceac450a5
|
cfb41f392fac304095a80d08497727c621550c00
|
/development/advect.py
|
5fcc3f4be115c324b3de431f9bbda0ca107f39e1
|
[
"BSD-3-Clause"
] |
permissive
|
clawpack/pyclaw
|
5b7121b63609c2cf9af30e012c9318e3b5244f18
|
6323b7295b80f33285b958b1a2144f88f51be4b1
|
refs/heads/master
| 2023-04-16T23:48:31.519427
| 2023-03-21T06:08:21
| 2023-03-21T06:08:21
| 1,628,711
| 124
| 97
|
BSD-3-Clause
| 2023-09-12T12:22:30
| 2011-04-18T03:11:21
|
Fortran
|
UTF-8
|
Python
| false
| false
| 1,673
|
py
|
advect.py
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
import sys
from six.moves import range
try:
import numpy as np
from petsc4py import PETSc
except:
sys.path.append("/opt/share/ksl/petsc4py/dev-aug29/ppc450d/lib/python/")
sys.path.append("/opt/share/ksl/numpy/dev-aug29/ppc450d/lib/python/")
import numpy as np
from petsc4py import PETSc
class PetCLAW:
def advection1D(self, M, cfl, T):
'''Script to solve 1D advection equation:
q_t + q_x = 0
Using first-order finite differences'''
da = PETSc.DA().create([M])
da.setUniformCoordinates() # solves the problem from 0 to 1
da.view()
xvec = da.getCoordinates()
xvec.view()
x = xvec.getArray()
h = x[1]-x[0]
k=cfl*h
fg = da.createGlobalVector()
fl = da.createLocalVector()
N=int(round(T/k))
# Initial condition:
q = np.exp(-10*(x-0.5)**2)
fg.setArray(q)
da.globalToLocal(fg,fl)
fg.view()
for n in range(N+1):
q = fl.getArray()
q[1:]=q[1:]-cfl*(q[1:]-q[:-1])
fl.setArray(q)
da.localToGlobal(fl,fg)
fg.view()
da.globalToLocal(fg,fl)
def run(self):
OptDB = PETSc.Options()
M = OptDB.getInt('M', 16)
cfl = OptDB.getReal('cfl',0.95)
T = OptDB.getReal('T',2.)
self.advection1D(M, cfl, T)
print('Done')
return
if __name__ == '__main__':
PetCLAW().run()
|
3b3526d372f8a5fef56fff90c862180c91f60e01
|
50203b4a349dcb2ed1e72c9f5463d84db8a6e983
|
/skyline/functions/metrics_manager/manage_mute_alerts_on.py
|
acc3bdbbc2ef008aab7817a1594786ec0db67aa2
|
[
"MIT"
] |
permissive
|
earthgecko/skyline
|
97e43df824d7c92d68086f529f0f3d051a7debb0
|
c2edc451e63d5eb57117ddcfbc6e79100e706460
|
refs/heads/master
| 2023-08-30T08:36:50.740285
| 2023-06-28T15:33:47
| 2023-06-28T15:33:47
| 20,475,900
| 482
| 74
|
NOASSERTION
| 2023-06-28T15:33:49
| 2014-06-04T08:33:15
|
Python
|
UTF-8
|
Python
| false
| false
| 3,538
|
py
|
manage_mute_alerts_on.py
|
"""
manage_mute_alerts_on.py
"""
import logging
from os import uname
from time import time
from settings import REMOTE_SKYLINE_INSTANCES
from slack_functions import slack_post_message
skyline_app = 'analyzer'
skyline_app_logger = '%sLog' % skyline_app
logger = logging.getLogger(skyline_app_logger)
# @added 20230605 - Feature #4932: mute_alerts_on
def manage_mute_alerts_on(self):
"""
Manage the metrics_manager.mute_alerts_on Redis hash
:param self: the self object
:type self: object
:return: removed_count
:rtype: int
"""
removed = 0
now = int(time())
logger.info('metrics_manager :: manage_mute_alerts_on - managing metrics_manager.mute_alerts_on')
mute_alerts_on_dict = {}
try:
mute_alerts_on_dict = self.redis_conn_decoded.hgetall('metrics_manager.mute_alerts_on')
except Exception as err:
logger.error('error :: metrics_manager :: manage_mute_alerts_on :: failed to hgetall metrics_manager.mute_alerts_on - %s' % (
err))
mute_alerts_on = list(mute_alerts_on_dict.keys())
logger.info('metrics_manager :: manage_mute_alerts_on :: metrics_manager.mute_alerts_on has %s items' % str(len(mute_alerts_on)))
metrics_to_remove = []
feedback_labelled_metric_ids_to_remove = []
for metric in mute_alerts_on:
timestamp = int(mute_alerts_on_dict[metric])
if now >= timestamp:
metrics_to_remove.append(metric)
del mute_alerts_on_dict[metric]
if metric.startswith('labelled_metrics.'):
metric_id_str = metric.split('.')[-1]
feedback_labelled_metric_ids_to_remove.append(metric_id_str)
if metrics_to_remove:
logger.info('metrics_manager :: manage_mute_alerts_on - removing %s metrics from metrics_manager.mute_alerts_on' % str(len(metrics_to_remove)))
try:
removed = self.redis_conn_decoded.hdel('metrics_manager.mute_alerts_on', *set(metrics_to_remove))
logger.info('metrics_manager :: manage_mute_alerts_on - removed %s metrics from metrics_manager.mute_alerts_on' % str(removed))
except Exception as err:
logger.error('error :: metrics_manager :: manage_mute_alerts_on :: failed to hdel metrics_manager.mute_alerts_on with metrics_to_remove - %s' % (
err))
else:
logger.info('metrics_manager :: manage_mute_alerts_on - no metrics to remove from metrics_manager.mute_alerts_on')
if feedback_labelled_metric_ids_to_remove:
try:
self.redis_conn.srem('metrics_manager.feedback.labelled_metric_ids', *set(feedback_labelled_metric_ids_to_remove))
except Exception as err:
logger.error('error :: metrics_manager :: failed to srem from metrics_manager.feedback.labelled_metric_ids Redis set - %s' % err)
if removed:
try:
alert_slack_channel = None
slack_message = '*Skyline - NOTICE* - alerting muting has expired and been *removed* from %s metrics' % (
str(removed))
slack_message = '%s - on %s' % (slack_message, str(uname()[1]))
slack_post = slack_post_message(skyline_app, alert_slack_channel, None, slack_message)
logger.info('metrics_manager :: manage_mute_alerts_on :: posted notice to slack - %s' % slack_message)
except Exception as err:
logger.error('error :: metrics_manager :: manage_mute_alerts_on :: slack_post_message failed - %s' % (
err))
return mute_alerts_on_dict, removed
|
b6d143990c367720c629883217c5f3f951e6e0a0
|
e8b04bef9aa1ac8e2c109dd315f133c8f4d28ae6
|
/projects/samples/robotbenchmark/robot_programming/controllers/supervisor/supervisor.py
|
0698858a3681d08242b80f77efe4fd91916e5e50
|
[
"Apache-2.0"
] |
permissive
|
cyberbotics/webots
|
f075dacf4067e8dcebbfd89e8690df8525f6d745
|
8aba6eaae76989facf3442305c8089d3cc366bcf
|
refs/heads/master
| 2023-08-31T09:41:13.205940
| 2023-08-18T10:48:30
| 2023-08-18T10:48:30
| 156,228,018
| 2,495
| 1,525
|
Apache-2.0
| 2023-08-28T16:30:33
| 2018-11-05T14:09:10
|
C++
|
UTF-8
|
Python
| false
| false
| 1,576
|
py
|
supervisor.py
|
"""Supervisor of the Robot Programming benchmark."""
from controller import Supervisor
import os
import sys
try:
includePath = os.path.join(os.path.normpath(os.environ.get("WEBOTS_HOME")), 'projects', 'samples', 'robotbenchmark',
'include')
sys.path.append(includePath)
from robotbenchmark import robotbenchmarkRecord
except ImportError:
sys.stderr.write("Warning: 'robotbenchmark' module not found.\n")
sys.exit(0)
robot = Supervisor()
timestep = int(robot.getBasicTimeStep())
thymio = robot.getFromDef("THYMIO2")
translation = thymio.getField("translation")
tx = 0
running = True
while robot.step(timestep) != -1:
t = translation.getSFVec3f()
if running:
percent = 1 - abs(0.25 + t[0]) / 0.25
if percent < 0:
percent = 0
if t[0] < -0.01 and abs(t[0] - tx) < 0.0001: # away from starting position and not moving any more
message = "stop"
running = False
else:
message = "percent"
message += ":" + str(percent)
robot.wwiSendText(message)
tx = t[0]
else: # wait for record message
message = robot.wwiReceiveText()
while message:
if message.startswith("record:"):
record = robotbenchmarkRecord(message, "robot_programming", percent)
robot.wwiSendText(record)
break
elif message == "exit":
break
message = robot.wwiReceiveText()
robot.simulationSetMode(Supervisor.SIMULATION_MODE_PAUSE)
|
81b3086028408995c4f19c993aa4145f2a4c9999
|
9798f5eca599c840bc9f307e9726f43a4c013aa1
|
/rest_framework_json_api/schemas/openapi.py
|
52f08da6e4d002c9dc9309cbbb9cf87e0e5bf8ae
|
[
"BSD-2-Clause"
] |
permissive
|
django-json-api/django-rest-framework-json-api
|
6e4591d5c85e448a2a9f4f9fe4f1fe7e79c0b72f
|
cd5f17970123400a2c5b98cfbb11f940d2cf09a9
|
refs/heads/main
| 2023-09-01T10:39:45.323109
| 2023-08-25T11:39:19
| 2023-08-25T11:39:19
| 21,987,374
| 1,133
| 378
|
BSD-2-Clause
| 2023-09-07T11:26:39
| 2014-07-18T16:46:14
|
Python
|
UTF-8
|
Python
| false
| false
| 36,003
|
py
|
openapi.py
|
import warnings
from urllib.parse import urljoin
from rest_framework.fields import empty
from rest_framework.relations import ManyRelatedField
from rest_framework.schemas import openapi as drf_openapi
from rest_framework.schemas.utils import is_list_view
from rest_framework_json_api import serializers, views
from rest_framework_json_api.compat import get_reference
from rest_framework_json_api.relations import ManySerializerMethodResourceRelatedField
from rest_framework_json_api.utils import format_field_name
class SchemaGenerator(drf_openapi.SchemaGenerator):
"""
Extend DRF's SchemaGenerator to implement JSON:API flavored generateschema command.
"""
#: These JSON:API component definitions are referenced by the generated OAS schema.
#: If you need to add more or change these static component definitions, extend this dict.
jsonapi_components = {
"schemas": {
"jsonapi": {
"type": "object",
"description": "The server's implementation",
"properties": {
"version": {"type": "string"},
"meta": {"$ref": "#/components/schemas/meta"},
},
"additionalProperties": False,
},
"resource": {
"type": "object",
"required": ["type", "id"],
"additionalProperties": False,
"properties": {
"type": {"$ref": "#/components/schemas/type"},
"id": {"$ref": "#/components/schemas/id"},
"attributes": {
"type": "object",
# ...
},
"relationships": {
"type": "object",
# ...
},
"links": {"$ref": "#/components/schemas/links"},
"meta": {"$ref": "#/components/schemas/meta"},
},
},
"include": {
"type": "object",
"required": ["type", "id"],
"additionalProperties": False,
"properties": {
"type": {"$ref": "#/components/schemas/type"},
"id": {"$ref": "#/components/schemas/id"},
"attributes": {
"type": "object",
"additionalProperties": True,
# ...
},
"relationships": {
"type": "object",
"additionalProperties": True,
# ...
},
"links": {"$ref": "#/components/schemas/links"},
"meta": {"$ref": "#/components/schemas/meta"},
},
},
"link": {
"oneOf": [
{
"description": "a string containing the link's URL",
"type": "string",
"format": "uri-reference",
},
{
"type": "object",
"required": ["href"],
"properties": {
"href": {
"description": "a string containing the link's URL",
"type": "string",
"format": "uri-reference",
},
"meta": {"$ref": "#/components/schemas/meta"},
},
},
]
},
"links": {
"type": "object",
"additionalProperties": {"$ref": "#/components/schemas/link"},
},
"reltoone": {
"description": "a singular 'to-one' relationship",
"type": "object",
"properties": {
"links": {"$ref": "#/components/schemas/relationshipLinks"},
"data": {"$ref": "#/components/schemas/relationshipToOne"},
"meta": {"$ref": "#/components/schemas/meta"},
},
},
"relationshipToOne": {
"description": "reference to other resource in a to-one relationship",
"anyOf": [
{"$ref": "#/components/schemas/nulltype"},
{"$ref": "#/components/schemas/linkage"},
],
},
"reltomany": {
"description": "a multiple 'to-many' relationship",
"type": "object",
"properties": {
"links": {"$ref": "#/components/schemas/relationshipLinks"},
"data": {"$ref": "#/components/schemas/relationshipToMany"},
"meta": {"$ref": "#/components/schemas/meta"},
},
},
"relationshipLinks": {
"description": "optional references to other resource objects",
"type": "object",
"additionalProperties": True,
"properties": {
"self": {"$ref": "#/components/schemas/link"},
"related": {"$ref": "#/components/schemas/link"},
},
},
"relationshipToMany": {
"description": "An array of objects each containing the "
"'type' and 'id' for to-many relationships",
"type": "array",
"items": {"$ref": "#/components/schemas/linkage"},
"uniqueItems": True,
},
# A RelationshipView uses a ResourceIdentifierObjectSerializer (hence the name
# ResourceIdentifierObject returned by get_component_name()) which serializes type
# and id. These can be lists or individual items depending on whether the
# relationship is toMany or toOne so offer both options since we are not iterating
# over all the possible {related_field}'s but rather rendering one path schema
# which may represent toMany and toOne relationships.
"ResourceIdentifierObject": {
"oneOf": [
{"$ref": "#/components/schemas/relationshipToOne"},
{"$ref": "#/components/schemas/relationshipToMany"},
]
},
"linkage": {
"type": "object",
"description": "the 'type' and 'id'",
"required": ["type", "id"],
"properties": {
"type": {"$ref": "#/components/schemas/type"},
"id": {"$ref": "#/components/schemas/id"},
"meta": {"$ref": "#/components/schemas/meta"},
},
},
"pagination": {
"type": "object",
"properties": {
"first": {"$ref": "#/components/schemas/pageref"},
"last": {"$ref": "#/components/schemas/pageref"},
"prev": {"$ref": "#/components/schemas/pageref"},
"next": {"$ref": "#/components/schemas/pageref"},
},
},
"pageref": {
"oneOf": [
{"type": "string", "format": "uri-reference"},
{"$ref": "#/components/schemas/nulltype"},
]
},
"failure": {
"type": "object",
"required": ["errors"],
"properties": {
"errors": {"$ref": "#/components/schemas/errors"},
"meta": {"$ref": "#/components/schemas/meta"},
"jsonapi": {"$ref": "#/components/schemas/jsonapi"},
"links": {"$ref": "#/components/schemas/links"},
},
},
"errors": {
"type": "array",
"items": {"$ref": "#/components/schemas/error"},
"uniqueItems": True,
},
"error": {
"type": "object",
"additionalProperties": False,
"properties": {
"id": {"type": "string"},
"status": {"type": "string"},
"links": {"$ref": "#/components/schemas/links"},
"code": {"type": "string"},
"title": {"type": "string"},
"detail": {"type": "string"},
"source": {
"type": "object",
"properties": {
"pointer": {
"type": "string",
"description": (
"A [JSON Pointer](https://tools.ietf.org/html/rfc6901) "
"to the associated entity in the request document "
"[e.g. `/data` for a primary data object, or "
"`/data/attributes/title` for a specific attribute."
),
},
"parameter": {
"type": "string",
"description": "A string indicating which query parameter "
"caused the error.",
},
"meta": {"$ref": "#/components/schemas/meta"},
},
},
},
},
"onlymeta": {
"additionalProperties": False,
"properties": {"meta": {"$ref": "#/components/schemas/meta"}},
},
"meta": {"type": "object", "additionalProperties": True},
"datum": {
"description": "singular item",
"properties": {"data": {"$ref": "#/components/schemas/resource"}},
},
"nulltype": {"type": "object", "nullable": True, "default": None},
"type": {
"type": "string",
"description": "The [type]"
"(https://jsonapi.org/format/#document-resource-object-identification) "
"member is used to describe resource objects that share common attributes "
"and relationships.",
},
"id": {
"type": "string",
"description": "Each resource object’s type and id pair MUST "
"[identify]"
"(https://jsonapi.org/format/#document-resource-object-identification) "
"a single, unique resource.",
},
},
"parameters": {
"include": {
"name": "include",
"in": "query",
"description": "[list of included related resources]"
"(https://jsonapi.org/format/#fetching-includes)",
"required": False,
"style": "form",
"schema": {"type": "string"},
},
# TODO: deepObject not well defined/supported:
# https://github.com/OAI/OpenAPI-Specification/issues/1706
"fields": {
"name": "fields",
"in": "query",
"description": "[sparse fieldsets]"
"(https://jsonapi.org/format/#fetching-sparse-fieldsets).\n"
"Use fields[\\<typename\\>]=field1,field2,...,fieldN",
"required": False,
"style": "deepObject",
"schema": {
"type": "object",
},
"explode": True,
},
},
}
def get_schema(self, request=None, public=False):
"""
Generate a JSON:API OpenAPI schema.
Overrides upstream DRF's get_schema.
"""
# TODO: avoid copying so much of upstream get_schema()
schema = super().get_schema(request, public)
components_schemas = {}
# Iterate endpoints generating per method path operations.
paths = {}
_, view_endpoints = self._get_paths_and_endpoints(None if public else request)
#: `expanded_endpoints` is like view_endpoints with one extra field tacked on:
#: - 'action' copy of current view.action (list/fetch) as this gets reset for
# each request.
expanded_endpoints = []
for path, method, view in view_endpoints:
if hasattr(view, "action") and view.action == "retrieve_related":
expanded_endpoints += self._expand_related(
path, method, view, view_endpoints
)
else:
expanded_endpoints.append(
(path, method, view, getattr(view, "action", None))
)
for path, method, view, action in expanded_endpoints:
if not self.has_view_permissions(path, method, view):
continue
# kludge to preserve view.action as it is 'list' for the parent ViewSet
# but the related viewset that was expanded may be either 'fetch' (to_one) or 'list'
# (to_many). This patches the view.action appropriately so that
# view.schema.get_operation() "does the right thing" for fetch vs. list.
current_action = None
if hasattr(view, "action"):
current_action = view.action
view.action = action
operation = view.schema.get_operation(path, method)
components = view.schema.get_components(path, method)
for k in components.keys():
if k not in components_schemas:
continue
if components_schemas[k] == components[k]:
continue
warnings.warn(
f'Schema component "{k}" has been overriden with a different value.',
stacklevel=1,
)
components_schemas.update(components)
if hasattr(view, "action"):
view.action = current_action
# Normalise path for any provided mount url.
if path.startswith("/"):
path = path[1:]
path = urljoin(self.url or "/", path)
paths.setdefault(path, {})
paths[path][method.lower()] = operation
self.check_duplicate_operation_id(paths)
# Compile final schema, overriding stuff from super class.
schema["paths"] = paths
schema["components"] = self.jsonapi_components
schema["components"]["schemas"].update(components_schemas)
return schema
def _expand_related(self, path, method, view, view_endpoints):
"""
Expand path containing .../{id}/{related_field} into list of related fields
and **their** views, making sure toOne relationship's views are a 'fetch' and toMany
relationship's are a 'list'.
:param path
:param method
:param view
:param view_endpoints
:return:list[tuple(path, method, view, action)]
"""
result = []
serializer = view.get_serializer()
# It's not obvious if it's allowed to have both included_ and related_ serializers,
# so just merge both dicts.
serializers = {}
if hasattr(serializer, "included_serializers"):
serializers = {**serializers, **serializer.included_serializers}
if hasattr(serializer, "related_serializers"):
serializers = {**serializers, **serializer.related_serializers}
related_fields = [fs for fs in serializers.items()]
for field, related_serializer in related_fields:
related_view = self._find_related_view(
view_endpoints, related_serializer, view
)
if related_view:
action = self._field_is_one_or_many(field, view)
result.append(
(
path.replace("{related_field}", field),
method,
related_view,
action,
)
)
return result
def _find_related_view(self, view_endpoints, related_serializer, parent_view):
"""
For a given related_serializer, try to find it's "parent" view instance.
:param view_endpoints: list of all view endpoints
:param related_serializer: the related serializer for a given related field
:param parent_view: the parent view (used to find toMany vs. toOne).
TODO: not actually used.
:return:view
"""
for _path, _method, view in view_endpoints:
view_serializer = view.get_serializer()
if isinstance(view_serializer, related_serializer):
return view
return None
def _field_is_one_or_many(self, field, view):
serializer = view.get_serializer()
if isinstance(serializer.fields[field], ManyRelatedField):
return "list"
else:
return "fetch"
class AutoSchema(drf_openapi.AutoSchema):
"""
Extend DRF's openapi.AutoSchema for JSON:API serialization.
"""
#: ignore all the media types and only generate a JSON:API schema.
content_types = ["application/vnd.api+json"]
def get_operation(self, path, method):
"""
JSON:API adds some standard fields to the API response that are not in upstream DRF:
- some that only apply to GET/HEAD methods.
- collections
- special handling for POST, PATCH, DELETE
"""
operation = {}
operation["operationId"] = self.get_operation_id(path, method)
operation["description"] = self.get_description(path, method)
serializer = self.get_response_serializer(path, method)
parameters = []
parameters += self.get_path_parameters(path, method)
# pagination, filters only apply to GET/HEAD of collections and items
if method in ["GET", "HEAD"]:
parameters += self._get_include_parameters(path, method, serializer)
parameters += self._get_fields_parameters(path, method)
parameters += self.get_pagination_parameters(path, method)
parameters += self.get_filter_parameters(path, method)
operation["parameters"] = parameters
operation["tags"] = self.get_tags(path, method)
# get request and response code schemas
if method == "GET":
if is_list_view(path, method, self.view):
self._add_get_collection_response(operation, path)
else:
self._add_get_item_response(operation, path)
elif method == "POST":
self._add_post_item_response(operation, path)
elif method == "PATCH":
self._add_patch_item_response(operation, path)
elif method == "DELETE":
# should only allow deleting a resource, not a collection
# TODO: implement delete of a relationship in future release.
self._add_delete_item_response(operation, path)
return operation
def get_operation_id(self, path, method):
"""
The upstream DRF version creates non-unique operationIDs, because the same view is
used for the main path as well as such as related and relationships.
This concatenates the (mapped) method name and path as the spec allows most any
"""
method_name = getattr(self.view, "action", method.lower())
if is_list_view(path, method, self.view):
action = "List"
elif method_name not in self.method_mapping:
action = method_name
else:
action = self.method_mapping[method.lower()]
return action + path
def _get_include_parameters(self, path, method, serializer):
"""
includes parameter: https://jsonapi.org/format/#fetching-includes
"""
if getattr(serializer, "included_serializers", {}):
return [{"$ref": "#/components/parameters/include"}]
return []
def _get_fields_parameters(self, path, method):
"""
sparse fieldsets https://jsonapi.org/format/#fetching-sparse-fieldsets
"""
# TODO: See if able to identify the specific types for fields[type]=... and return this:
# name: fields
# in: query
# description: '[sparse fieldsets](https://jsonapi.org/format/#fetching-sparse-fieldsets)' # noqa: B950
# required: true
# style: deepObject
# schema:
# type: object
# properties:
# hello:
# type: string # noqa F821
# world:
# type: string # noqa F821
# explode: true
return [{"$ref": "#/components/parameters/fields"}]
def _add_get_collection_response(self, operation, path):
"""
Add GET 200 response for a collection to operation
"""
operation["responses"] = {
"200": self._get_toplevel_200_response(
operation, path, "GET", collection=True
)
}
self._add_get_4xx_responses(operation)
def _add_get_item_response(self, operation, path):
"""
add GET 200 response for an item to operation
"""
operation["responses"] = {
"200": self._get_toplevel_200_response(
operation, path, "GET", collection=False
)
}
self._add_get_4xx_responses(operation)
def _get_toplevel_200_response(self, operation, path, method, collection=True):
"""
return top-level JSON:API GET 200 response
:param collection: True for collections; False for individual items.
Uses a $ref to the components.schemas.<Name> component definition.
"""
if collection:
data = {
"type": "array",
"items": get_reference(
self, self.get_response_serializer(path, method)
),
}
else:
data = get_reference(self, self.get_response_serializer(path, method))
return {
"description": operation["operationId"],
"content": {
"application/vnd.api+json": {
"schema": {
"type": "object",
"required": ["data"],
"properties": {
"data": data,
"included": {
"type": "array",
"uniqueItems": True,
"items": {"$ref": "#/components/schemas/include"},
},
"links": {
"description": "Link members related to primary data",
"allOf": [
{"$ref": "#/components/schemas/links"},
{"$ref": "#/components/schemas/pagination"},
],
},
"jsonapi": {"$ref": "#/components/schemas/jsonapi"},
},
}
}
},
}
def _add_post_item_response(self, operation, path):
"""
add response for POST of an item to operation
"""
operation["requestBody"] = self.get_request_body(path, "POST")
operation["responses"] = {
"201": self._get_toplevel_200_response(
operation, path, "POST", collection=False
)
}
operation["responses"]["201"]["description"] = (
"[Created](https://jsonapi.org/format/#crud-creating-responses-201). "
"Assigned `id` and/or any other changes are in this response."
)
self._add_async_response(operation)
operation["responses"]["204"] = {
"description": "[Created](https://jsonapi.org/format/#crud-creating-responses-204) "
"with the supplied `id`. No other changes from what was POSTed."
}
self._add_post_4xx_responses(operation)
def _add_patch_item_response(self, operation, path):
"""
Add PATCH response for an item to operation
"""
operation["requestBody"] = self.get_request_body(path, "PATCH")
operation["responses"] = {
"200": self._get_toplevel_200_response(
operation, path, "PATCH", collection=False
)
}
self._add_patch_4xx_responses(operation)
def _add_delete_item_response(self, operation, path):
"""
add DELETE response for item or relationship(s) to operation
"""
# Only DELETE of relationships has a requestBody
if isinstance(self.view, views.RelationshipView):
operation["requestBody"] = self.get_request_body(path, "DELETE")
self._add_delete_responses(operation)
def get_request_body(self, path, method):
"""
A request body is required by JSON:API for POST, PATCH, and DELETE methods.
"""
serializer = self.get_request_serializer(path, method)
if not isinstance(serializer, (serializers.BaseSerializer,)):
return {}
is_relationship = isinstance(self.view, views.RelationshipView)
# DRF uses a $ref to the component schema definition, but this
# doesn't work for JSON:API due to the different required fields based on
# the method, so make those changes and inline another copy of the schema.
# TODO: A future improvement could make this DRYer with multiple component schemas:
# A base schema for each viewset that has no required fields
# One subclassed from the base that requires some fields (`type` but not `id` for POST)
# Another subclassed from base with required type/id but no required attributes (PATCH)
if is_relationship:
item_schema = {"$ref": "#/components/schemas/ResourceIdentifierObject"}
else:
item_schema = self.map_serializer(serializer)
if method == "POST":
# 'type' and 'id' are both required for:
# - all relationship operations
# - regular PATCH or DELETE
# Only 'type' is required for POST: system may assign the 'id'.
item_schema["required"] = ["type"]
if "properties" in item_schema and "attributes" in item_schema["properties"]:
# No required attributes for PATCH
if (
method in ["PATCH", "PUT"]
and "required" in item_schema["properties"]["attributes"]
):
del item_schema["properties"]["attributes"]["required"]
# No read_only fields for request.
for name, schema in (
item_schema["properties"]["attributes"]["properties"].copy().items()
): # noqa E501
if "readOnly" in schema:
del item_schema["properties"]["attributes"]["properties"][name]
if "properties" in item_schema and "relationships" in item_schema["properties"]:
# No required relationships for PATCH
if (
method in ["PATCH", "PUT"]
and "required" in item_schema["properties"]["relationships"]
):
del item_schema["properties"]["relationships"]["required"]
return {
"content": {
ct: {
"schema": {
"required": ["data"],
"properties": {"data": item_schema},
}
}
for ct in self.content_types
}
}
def map_serializer(self, serializer):
"""
Custom map_serializer that serializes the schema using the JSON:API spec.
Non-attributes like related and identity fields, are moved to 'relationships'
and 'links'.
"""
# TODO: remove attributes, etc. for relationshipView??
required = []
attributes = {}
relationships_required = []
relationships = {}
for field in serializer.fields.values():
if isinstance(field, serializers.HyperlinkedIdentityField):
# the 'url' is not an attribute but rather a self.link, so don't map it here.
continue
if isinstance(field, serializers.HiddenField):
continue
if isinstance(
field,
(
serializers.ManyRelatedField,
ManySerializerMethodResourceRelatedField,
),
):
if field.required:
relationships_required.append(format_field_name(field.field_name))
relationships[format_field_name(field.field_name)] = {
"$ref": "#/components/schemas/reltomany"
}
continue
if isinstance(field, serializers.RelatedField):
if field.required:
relationships_required.append(format_field_name(field.field_name))
relationships[format_field_name(field.field_name)] = {
"$ref": "#/components/schemas/reltoone"
}
continue
if field.field_name == "id":
# ID is always provided in the root of JSON:API and removed from the
# attributes in JSONRenderer.
continue
if field.required:
required.append(format_field_name(field.field_name))
schema = self.map_field(field)
if field.read_only:
schema["readOnly"] = True
if field.write_only:
schema["writeOnly"] = True
if field.allow_null:
schema["nullable"] = True
if field.default and field.default != empty and not callable(field.default):
schema["default"] = field.default
if field.help_text:
# Ensure django gettext_lazy is rendered correctly
schema["description"] = str(field.help_text)
self.map_field_validators(field, schema)
attributes[format_field_name(field.field_name)] = schema
result = {
"type": "object",
"required": ["type", "id"],
"additionalProperties": False,
"properties": {
"type": {"$ref": "#/components/schemas/type"},
"id": {"$ref": "#/components/schemas/id"},
"links": {
"type": "object",
"properties": {"self": {"$ref": "#/components/schemas/link"}},
},
},
}
if attributes:
result["properties"]["attributes"] = {
"type": "object",
"properties": attributes,
}
if required:
result["properties"]["attributes"]["required"] = required
if relationships:
result["properties"]["relationships"] = {
"type": "object",
"properties": relationships,
}
if relationships_required:
result["properties"]["relationships"][
"required"
] = relationships_required
return result
def _add_async_response(self, operation):
"""
Add async response to operation
"""
operation["responses"]["202"] = {
"description": "Accepted for [asynchronous processing]"
"(https://jsonapi.org/recommendations/#asynchronous-processing)",
"content": {
"application/vnd.api+json": {
"schema": {"$ref": "#/components/schemas/datum"}
}
},
}
def _failure_response(self, reason):
"""
Return failure response reason as the description
"""
return {
"description": reason,
"content": {
"application/vnd.api+json": {
"schema": {"$ref": "#/components/schemas/failure"}
}
},
}
def _add_generic_failure_responses(self, operation):
"""
Add generic failure response(s) to operation
"""
for code, reason in [
("400", "bad request"),
("401", "not authorized"),
]:
operation["responses"][code] = self._failure_response(reason)
def _add_get_4xx_responses(self, operation):
"""
Add generic 4xx GET responses to operation
"""
self._add_generic_failure_responses(operation)
for code, reason in [("404", "not found")]:
operation["responses"][code] = self._failure_response(reason)
def _add_post_4xx_responses(self, operation):
"""
Add POST 4xx error responses to operation
"""
self._add_generic_failure_responses(operation)
for code, reason in [
(
"403",
"[Forbidden](https://jsonapi.org/format/#crud-creating-responses-403)",
),
(
"404",
"[Related resource does not exist]"
"(https://jsonapi.org/format/#crud-creating-responses-404)",
),
(
"409",
"[Conflict](https://jsonapi.org/format/#crud-creating-responses-409)",
),
]:
operation["responses"][code] = self._failure_response(reason)
def _add_patch_4xx_responses(self, operation):
"""
Add PATCH 4xx error responses to operation
"""
self._add_generic_failure_responses(operation)
for code, reason in [
(
"403",
"[Forbidden](https://jsonapi.org/format/#crud-updating-responses-403)",
),
(
"404",
"[Related resource does not exist]"
"(https://jsonapi.org/format/#crud-updating-responses-404)",
),
(
"409",
"[Conflict]([Conflict]"
"(https://jsonapi.org/format/#crud-updating-responses-409)",
),
]:
operation["responses"][code] = self._failure_response(reason)
def _add_delete_responses(self, operation):
"""
Add generic DELETE responses to operation
"""
# the 2xx statuses:
operation["responses"] = {
"200": {
"description": "[OK](https://jsonapi.org/format/#crud-deleting-responses-200)",
"content": {
"application/vnd.api+json": {
"schema": {"$ref": "#/components/schemas/onlymeta"}
}
},
}
}
self._add_async_response(operation)
operation["responses"]["204"] = {
"description": "[no content](https://jsonapi.org/format/#crud-deleting-responses-204)", # noqa: B950
}
# the 4xx errors:
self._add_generic_failure_responses(operation)
for code, reason in [
(
"404",
"[Resource does not exist]"
"(https://jsonapi.org/format/#crud-deleting-responses-404)",
),
]:
operation["responses"][code] = self._failure_response(reason)
|
f77d8973e58d92c20359a97e952c2fb79b6850f2
|
3ef70fe63acaa665e2b163f30f1abd0a592231c1
|
/stackoverflow/venv/lib/python3.6/site-packages/scrapy/utils/display.py
|
f6a6c46454efdc08bdf119ce27818b4e6cc93c35
|
[
"MIT"
] |
permissive
|
wistbean/learn_python3_spider
|
14914b63691ac032955ba1adc29ad64976d80e15
|
40861791ec4ed3bbd14b07875af25cc740f76920
|
refs/heads/master
| 2023-08-16T05:42:27.208302
| 2023-03-30T17:03:58
| 2023-03-30T17:03:58
| 179,152,420
| 14,403
| 3,556
|
MIT
| 2022-05-20T14:08:34
| 2019-04-02T20:19:54
|
Python
|
UTF-8
|
Python
| false
| false
| 699
|
py
|
display.py
|
"""
pprint and pformat wrappers with colorization support
"""
from __future__ import print_function
import sys
from pprint import pformat as pformat_
def _colorize(text, colorize=True):
if not colorize or not sys.stdout.isatty():
return text
try:
from pygments import highlight
from pygments.formatters import TerminalFormatter
from pygments.lexers import PythonLexer
return highlight(text, PythonLexer(), TerminalFormatter())
except ImportError:
return text
def pformat(obj, *args, **kwargs):
return _colorize(pformat_(obj), kwargs.pop('colorize', True))
def pprint(obj, *args, **kwargs):
print(pformat(obj, *args, **kwargs))
|
2cb4e4b675f537c63864330614db08126ca922d5
|
ad3ae46b6d8de00d434959fa413dd730fce48055
|
/tests/test_project/makemigrations_correct_migration_missing/models.py
|
c15040f8ba01ef78c03d9e544bb2f3498b559b61
|
[
"Apache-2.0"
] |
permissive
|
3YOURMIND/django-migration-linter
|
5f4e5ad1485eca7b740fea7d3f69c3d345448fcd
|
3503367f1fcb34e633577cf70df9ab037d1f394c
|
refs/heads/main
| 2023-08-31T15:36:03.626246
| 2023-07-09T12:43:55
| 2023-07-09T12:43:55
| 87,307,127
| 466
| 62
|
Apache-2.0
| 2023-09-11T09:08:24
| 2017-04-05T12:33:19
|
Python
|
UTF-8
|
Python
| false
| false
| 180
|
py
|
models.py
|
from __future__ import annotations
from django.db import models
class A(models.Model):
x = models.IntegerField()
new_field = models.CharField(null=True, max_length=255)
|
8116f287c8fa31b370a93f20815b51e171274af6
|
acc81cd3c5c9785990479d39706d63fe37417574
|
/keyring/backends/fail.py
|
9bcdfc8094d0bfaa93560a56e610ccf0abd0efe7
|
[
"MIT"
] |
permissive
|
jaraco/keyring
|
bf91a6cbe9b4ce7cfff585a6a9bf7fecbbbc9c5e
|
af7233958ece36da8d493509a6388484f5534dfc
|
refs/heads/main
| 2023-08-08T14:53:16.361761
| 2023-07-23T13:01:17
| 2023-07-23T13:01:17
| 31,262,911
| 1,118
| 193
|
MIT
| 2023-09-13T17:32:33
| 2015-02-24T14:10:21
|
Python
|
UTF-8
|
Python
| false
| false
| 929
|
py
|
fail.py
|
from ..backend import KeyringBackend
from .._compat import properties
from ..errors import NoKeyringError
class Keyring(KeyringBackend):
"""
Keyring that raises error on every operation.
>>> kr = Keyring()
>>> kr.get_password('svc', 'user')
Traceback (most recent call last):
...
keyring.errors.NoKeyringError: ...No recommended backend...
"""
@properties.classproperty
def priority(cls) -> int:
return 0
def get_password(self, service, username, password=None):
msg = (
"No recommended backend was available. Install a recommended 3rd "
"party backend package; or, install the keyrings.alt package if "
"you want to use the non-recommended backends. See "
"https://pypi.org/project/keyring for details."
)
raise NoKeyringError(msg)
set_password = delete_password = get_password # type: ignore
|
8db734d3675dd8282faf5a2108e2bbf434569763
|
1373b5f879da9a9b25b53c16d81de86f2f3b222b
|
/banding_removal/fastmri/checkpointing_mixin.py
|
ed9e021aa1e226420dade11e6f4412a1cdd1d997
|
[
"MIT"
] |
permissive
|
facebookresearch/fastMRI
|
316daf77f53c61cd297aa8cb3509d002a8300648
|
84ab6e57748c2631b0e780255dcbc1d6e372269d
|
refs/heads/main
| 2023-09-05T09:54:25.653593
| 2023-06-26T17:03:09
| 2023-06-26T17:03:09
| 154,900,564
| 1,174
| 400
|
MIT
| 2023-06-26T17:03:10
| 2018-10-26T22:21:23
|
Python
|
UTF-8
|
Python
| false
| false
| 7,412
|
py
|
checkpointing_mixin.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import os
import signal
import sys
from subprocess import call, Popen, PIPE
import torch
import logging
import pickle
import pdb
from pathlib import Path
import time
def remove_prefix_from_model(model_state_dict):
prefix = next(iter(model_state_dict.keys())).split('.', 1)[0]
if all(k.startswith(prefix) for k in model_state_dict.keys()):
logging.info(f"Removing model prefix '{prefix}' from checkpoint")
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in model_state_dict.items():
name = k.split('.', 1)[1] #k[7:] # remove 'module.'
new_state_dict[name] = v
return new_state_dict
else:
return model_state_dict
class CheckpointingMixin(object):
"""
Follows Slurm checkpointing advice in:
https://fb.quip.com/Ov7NA1FD3mmx
"""
def initial_setup(self, args):
super().initial_setup(args)
self.runinfo_path = self.exp_dir / "current_runinfo.pkl"
self.model_path = self.exp_dir / 'current_model.mdl'
if args.checkpoint_type == "resume":
logging.info(f"RESUMING from checkpoint {args.checkpoint}")
self.checkpoint = torch.load(args.checkpoint)
if args.checkpoint_type == "restart":
logging.info(f"RESTARTING from checkpoint {args.checkpoint}")
self.checkpoint = torch.load(args.checkpoint)
else:
# Check for partial job as well
if self.model_path.exists() and args.auto_requeue:
logging.info("")
logging.info(f"FOUND model file in working directory, resuming ...")
logging.info("")
self.checkpoint = torch.load(self.model_path)
args.checkpoint_type = "resume"
# Set up signal handler to detect Slurm signals
if args.auto_requeue: #and 'SLURM_NODEID' in os.environ:
signal.signal(signal.SIGUSR1, self.requeueHandler)
signal.signal(signal.SIGTERM, self.termHandler)
logging.info('Signal handler installed for automatic requeuing')
self.MAIN_PID = os.getpid()
self.HALT_filename = 'HALT'
self.SIGNAL_RECEIVED = False
'''Halt file is used as a sign of job completionself.
Make sure no HALT file left from previous runs.
'''
if os.path.isfile(self.HALT_filename):
os.remove(self.HALT_filename)
def model_setup(self, args):
super().model_setup(args)
if args.checkpoint_type == "resume" or args.checkpoint_type == "restart":
model_state_dict = self.checkpoint['model']
#model_state_dict = remove_prefix_from_model(self.checkpoint['model'])
# Should be compatible with models that do and do not use dataparallel
try:
self.model.load_state_dict(model_state_dict, strict=True)
except:
for _ in range(len(model_state_dict)):
k, v = model_state_dict.popitem(False)
idx = k.find('module.')
if idx >= 0:
newkey = k[:idx] + k[idx+len('module.'):]
model_state_dict[newkey] = v
else:
model_state_dict[k] = v
self.model.load_state_dict(model_state_dict, strict=True)
def optimizer_setup(self, args):
super().optimizer_setup(args)
if args.checkpoint_type == "resume":
self.optimizer.load_state_dict(self.checkpoint['optimizer'])
def runinfo_setup(self, args):
super().runinfo_setup(args)
if args.checkpoint_type == "resume":
self.runinfo = self.checkpoint["runinfo"]
# at_epoch is last epoch completed (unless at beginning), so start next one.
if len(self.runinfo["dev_losses"]) > 0:
self.runinfo["at_epoch"] += 1
del self.checkpoint # keep memory usage down
else:
# Save before first epoch so we can resume if preempted early
self.save_info()
self.save_model()
def end_of_epoch_hook(self, epoch):
super().end_of_epoch_hook(epoch)
self.save_info()
self.save_model()
def save_info(self):
if not self.args.save_info or self.args.rank != 0:
return
with open(self.runinfo_path, 'wb') as output:
pickle.dump(self.runinfo, output)
logging.info(f"Saved runinfo {self.runinfo_path.resolve()}")
def save_model(self):
if not self.args.save_model or self.args.rank != 0:
return
logging.debug("Saving model ...")
# Avoid corruption if we crash during save by saving to a tmp and then moving
tmp_model_path = self.model_path.with_suffix(".mdl.tmp")
torch.save(self.serialize(), f = tmp_model_path)
tmp_model_path.replace(self.model_path)
logging.info(f"Saved model {self.model_path.resolve()}")
def termHandler(self, signum, frame):
"""
Slurm preemption sends a SIGTERM before the SIGUSR1, to give you a warning
that the process is going to be preempted. This needs to be caught, otherwise
the process will exit early.
"""
print("SIGTERM caught and ignored", flush=True)
def requeueHandler(self, signum, frame):
"""
A USR1 signal is sent by slurm if the timelimit of the job is reached
or if the job is about to be preempted
"""
args = self.args
print('Signal received', signum, time.time(), flush=True)
self.SIGNAL_RECEIVED = True
if os.path.isfile(self.HALT_filename):
print('Job is done, exiting', flush=True)
exit(0)
def trigger_job_requeue(self):
""" Submit a new job to resume from checkpoint.
No need to checkpoint model or runinfo here since we don't
currently support resuming from specific batches/iterations
(only from epochs)
"""
if self.args.rank == 0:
### This ensures that only the main processes (rank 0) requeues the job
print('Time is up, back to SLURM queue', flush=True)
command = 'scontrol requeue ' + os.environ['SLURM_JOB_ID']
print(command)
if os.system(command):
raise RuntimeError('requeue failed')
print('Job successfully requeued', flush=True)
else:
print(f'Non-primary process {os.getpid()} waiting for requeue', flush=True)
if self.args.is_distributed:
self.barrier()
logging.info(f"requeue synced")
exit(0)
def start_of_batch_hook(self, progress, logging_epoch):
if self.args.auto_requeue and self.SIGNAL_RECEIVED:
self.trigger_job_requeue()
super().start_of_batch_hook(progress, logging_epoch)
def start_of_test_batch_hook(self, progress, logging_epoch):
if self.args.auto_requeue and self.SIGNAL_RECEIVED:
self.trigger_job_requeue()
super().start_of_test_batch_hook(progress, logging_epoch)
|
88572202125ef95326679e62ebcbf0c9fcabdf09
|
df1254b56f35b24644e00493c50d4b6eb3c15b7b
|
/colour/volume/tests/test_macadam_limits.py
|
fa7cdd3d03eda6bfea38fcb3138e76301116325e
|
[
"BSD-3-Clause"
] |
permissive
|
colour-science/colour
|
908400b227cf81668675e41099256ce50b23ae4b
|
1fdf3b3042922e8d4f86b989b00a06e7e5d81102
|
refs/heads/develop
| 2023-09-01T23:17:07.186869
| 2023-08-26T09:40:45
| 2023-08-26T09:40:45
| 17,114,363
| 1,756
| 301
|
BSD-3-Clause
| 2023-09-14T10:24:37
| 2014-02-23T18:55:40
|
Python
|
UTF-8
|
Python
| false
| false
| 2,454
|
py
|
test_macadam_limits.py
|
# !/usr/bin/env python
"""Define the unit tests for the :mod:`colour.volume.macadam_limits` module."""
import numpy as np
import unittest
from itertools import product
from colour.volume import is_within_macadam_limits
from colour.utilities import ignore_numpy_errors
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "BSD-3-Clause - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org"
__status__ = "Production"
__all__ = [
"TestIsWithinMacadamLimits",
]
class TestIsWithinMacadamLimits(unittest.TestCase):
"""
Define :func:`colour.volume.macadam_limits.is_within_macadam_limits`
definition unit tests methods.
"""
def test_is_within_macadam_limits(self):
"""
Test :func:`colour.volume.macadam_limits.is_within_macadam_limits`
definition.
"""
self.assertTrue(
is_within_macadam_limits(np.array([0.3205, 0.4131, 0.5100]), "A")
)
self.assertFalse(
is_within_macadam_limits(np.array([0.0005, 0.0031, 0.0010]), "A")
)
self.assertTrue(
is_within_macadam_limits(np.array([0.4325, 0.3788, 0.1034]), "C")
)
self.assertFalse(
is_within_macadam_limits(np.array([0.0025, 0.0088, 0.0340]), "C")
)
def test_n_dimensional_is_within_macadam_limits(self):
"""
Test :func:`colour.volume.macadam_limits.is_within_macadam_limits`
definition n-dimensional arrays support.
"""
a = np.array([0.3205, 0.4131, 0.5100])
b = is_within_macadam_limits(a, "A")
a = np.tile(a, (6, 1))
b = np.tile(b, 6)
np.testing.assert_array_almost_equal(
is_within_macadam_limits(a, "A"), b
)
a = np.reshape(a, (2, 3, 3))
b = np.reshape(b, (2, 3))
np.testing.assert_array_almost_equal(
is_within_macadam_limits(a, "A"), b
)
@ignore_numpy_errors
def test_nan_is_within_macadam_limits(self):
"""
Test :func:`colour.volume.macadam_limits.is_within_macadam_limits`
definition nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = np.array(list(set(product(cases, repeat=3))))
is_within_macadam_limits(cases, "A")
if __name__ == "__main__":
unittest.main()
|
05ecb5610d92cd3a6b86d26bcb1748e972766cd6
|
c3e0a6919caf85c35239ef23084df9bbf8dd61c3
|
/pypeit/tests/test_procimg.py
|
d04fe98f350a61da587796b405349d202f8f14e7
|
[
"BSD-3-Clause"
] |
permissive
|
pypeit/PypeIt
|
6eb9e5afd62acc9d363e497cd9e367d620f86ea4
|
0d2e2196afc6904050b1af4d572f5c643bb07e38
|
refs/heads/release
| 2023-08-25T21:15:59.113114
| 2023-06-04T15:23:39
| 2023-06-04T15:23:39
| 36,958,428
| 136
| 98
|
BSD-3-Clause
| 2023-09-12T17:42:15
| 2015-06-05T22:25:37
|
Python
|
UTF-8
|
Python
| false
| false
| 5,348
|
py
|
test_procimg.py
|
"""
Module to run tests on core.procimg functions.
"""
import os
from IPython import embed
import numpy as np
from astropy.convolution import convolve
from pypeit.core import procimg
from pypeit import utils
def test_replace_columns():
y = np.zeros((10,3), dtype=float)
y[:,2] = 2
bad_col = np.array([False, True, False])
assert np.array_equal(procimg.replace_columns(y, bad_col, copy=True, replace_with='mean'),
procimg.replace_columns(y, bad_col, copy=True, replace_with='linear')), \
'Interpolation and mean should provide the same result.'
bad_col = np.array([False, True, True])
assert np.array_equal(procimg.replace_columns(y, bad_col, copy=True, replace_with='mean'),
np.zeros_like(y)), 'Should set everything to 0.'
bad_col = np.array([True, True, False])
assert np.array_equal(procimg.replace_columns(y, bad_col, copy=True, replace_with='mean'),
np.full_like(y, 2)), 'Should set everything to 2.'
y = np.zeros((10,4), dtype=float)
y[:,3] = 3
bad_col = np.array([False, True, True, False])
assert np.array_equal(procimg.replace_columns(y, bad_col, copy=True, replace_with='linear'),
np.repeat(np.arange(4),10).reshape(4,10).T), \
'Interpolation failed.'
def test_rn2_frame():
# Bogus image
datasec = np.ones((10,10), dtype=int)
datasec[5:] = 2
rn = np.array([2.5, 3.5])
gain = np.array([1.2, 1.5])
rnvar = procimg.rn2_frame(datasec, rn, digitization=False)
assert rnvar.shape == datasec.shape, 'Shape mismatch'
assert np.array_equal(np.unique(rnvar), rn**2), 'Bad RN variance calculation'
rnvar = procimg.rn2_frame(datasec, rn, units='ADU', gain=gain, digitization=False)
assert np.allclose(np.unique(rnvar), (rn/gain)**2), 'Bad RN variance calculation'
def test_sub_overscan():
datasec = np.zeros((10,10), dtype=int)
datasec[:5,:-3] = 1
datasec[5:,:-3] = 2
oscan = np.zeros((10,10), dtype=int)
oscan[:5,-3:] = 1
oscan[5:,-3:] = 2
raw = np.zeros((10,10), dtype=float)
raw[datasec == 1] = 10.
raw[datasec == 2] = 20.
raw[oscan == 1] = 9.
raw[oscan == 2] = 19.
raw_sub, _ = procimg.subtract_overscan(raw, datasec, oscan, method='median')
assert np.array_equal(raw_sub[datasec > 0], np.ones(np.sum(datasec > 0), dtype=float)), \
'Bad overscan subtraction'
var = np.ones((10,10), dtype=float)
raw_sub, var_sub = procimg.subtract_overscan(raw, datasec, oscan, method='median', var=var)
assert np.array_equal(var_sub[datasec > 0],
np.full(np.sum(datasec > 0), np.pi/2/15, dtype=float)), \
'Bad variance calculation'
def test_trim():
datasec = np.zeros((10,10), dtype=int)
datasec[:5,:-3] = 1
datasec[5:,:-3] = 2
_datasec = procimg.trim_frame(datasec, datasec < 1)
assert _datasec.shape == (10,7), 'Trimming error'
assert np.array_equal(datasec[datasec > 0], _datasec.flat), 'Values changed'
def test_var_model():
# Bogus image
datasec = np.ones((10,10), dtype=int)
datasec[5:] = 2
rn = np.array([2.5, 3.5])
rnvar = procimg.rn2_frame(datasec, rn)
assert np.array_equal(rnvar, procimg.base_variance(rnvar)), \
'Variance model with only rnvar is just rnvar'
counts = np.full(rnvar.shape, 10., dtype=float)
assert np.array_equal(rnvar, procimg.variance_model(rnvar)), \
'Variance model should just return input if no optional parameters are provided.'
base = procimg.base_variance(rnvar, darkcurr=10.)
base_t = procimg.base_variance(rnvar, darkcurr=5., exptime=2.*3600)
assert np.all(procimg.variance_model(rnvar, counts=counts) > rnvar), \
'Shot noise should increase the variance'
assert np.all(procimg.variance_model(base, counts=counts) > base), \
'Shot noise should increase the variance'
assert np.array_equal(
procimg.variance_model(base, counts=counts),
procimg.variance_model(base_t, counts=counts)), \
'Dark current should be equivalent'
assert np.all(procimg.base_variance(rnvar, proc_var=10.) > rnvar), \
'Processing variance should increase the total variance'
assert np.all(procimg.variance_model(rnvar, counts=counts, count_scale=0.5) <
procimg.variance_model(rnvar, counts=counts)), \
'Scaling should have decreased the noise.'
assert np.all(procimg.variance_model(rnvar, counts=counts, noise_floor=0.1) > rnvar), \
'Noise floor should have increased the variance.'
def test_grow_mask():
mask = np.zeros((9,9), dtype=bool)
mask[4,4] = True
grw_msk = procimg.grow_mask(mask, 2.)
_grw_msk = np.zeros((9,9), dtype=bool)
_grw_msk[3:-3,3] = True
_grw_msk[2:-2,4] = True
_grw_msk[3:-3,5] = True
_grw_msk[3,3:-3] = True
_grw_msk[4,2:-2] = True
_grw_msk[5,3:-3] = True
assert np.array_equal(grw_msk, _grw_msk), 'Bad mask growth'
def test_boxcar():
a = np.arange(100).reshape(10,10).astype(float)
arep = procimg.boxcar_replicate(a, 2)
assert np.array_equal(procimg.boxcar_average(arep, 2), a), 'Bad replicate/average'
assert np.array_equal(utils.rebin_evlist(arep, a.shape), a), 'Bad replicate/rebin'
|
efaa093850eff3ef8462c310bb5407fcf6184943
|
915d6cd33ed4293d83a15a2a03bd126a1f03fc97
|
/xknx/remote_value/remote_value_climate_mode.py
|
5f688bbe9ccd94ddd2351c6461970a285b261f18
|
[
"MIT"
] |
permissive
|
XKNX/xknx
|
5e02e3588ab8b2a4dcd7895b94cd39c2894070a8
|
48d4e31365c15e632b275f0d129cd9f2b2b5717d
|
refs/heads/main
| 2023-09-02T11:18:18.093379
| 2023-08-28T11:06:58
| 2023-08-28T11:06:58
| 51,259,458
| 248
| 131
|
MIT
| 2023-09-11T11:54:55
| 2016-02-07T18:29:39
|
Python
|
UTF-8
|
Python
| false
| false
| 10,318
|
py
|
remote_value_climate_mode.py
|
"""
Module for managing an climate mode remote values.
DPT .
"""
from __future__ import annotations
from abc import abstractmethod
from enum import Enum
from typing import TYPE_CHECKING, Any, Optional
from xknx.dpt import (
DPTArray,
DPTBinary,
DPTControllerStatus,
DPTHVACContrMode,
DPTHVACMode,
)
from xknx.dpt.dpt_hvac_mode import HVACControllerMode, HVACModeT, HVACOperationMode
from xknx.exceptions import ConversionError, CouldNotParseTelegram
from .remote_value import AsyncCallbackType, GroupAddressesType, RemoteValue
if TYPE_CHECKING:
from xknx.xknx import XKNX
class RemoteValueClimateModeBase(RemoteValue[Optional[HVACModeT]]):
"""Base class for binary climate mode remote values."""
@abstractmethod
def supported_operation_modes(
self,
) -> list[HVACModeT]:
"""Return a list of all supported operation modes."""
class RemoteValueOperationMode(RemoteValueClimateModeBase[HVACOperationMode]):
"""Abstraction for remote value of KNX climate operation modes."""
class ClimateModeType(Enum):
"""Implemented climate mode types."""
CONTROLLER_STATUS = DPTControllerStatus
HVAC_MODE = DPTHVACMode
def __init__(
self,
xknx: XKNX,
group_address: GroupAddressesType | None = None,
group_address_state: GroupAddressesType | None = None,
sync_state: bool | int | float | str = True,
device_name: str | None = None,
feature_name: str = "Climate mode",
climate_mode_type: ClimateModeType | None = None,
after_update_cb: AsyncCallbackType | None = None,
):
"""Initialize remote value of KNX climate mode."""
super().__init__(
xknx,
group_address=group_address,
group_address_state=group_address_state,
sync_state=sync_state,
device_name=device_name,
feature_name=feature_name,
after_update_cb=after_update_cb,
)
if not isinstance(climate_mode_type, self.ClimateModeType):
raise ConversionError(
"invalid climate mode type",
climate_mode_type=str(climate_mode_type),
device_name=str(device_name),
feature_name=feature_name,
)
self._climate_mode_transcoder: (
DPTControllerStatus | DPTHVACMode
) = climate_mode_type.value
def supported_operation_modes(self) -> list[HVACOperationMode]:
"""Return a list of all supported operation modes."""
return list(self._climate_mode_transcoder.SUPPORTED_MODES.values())
def to_knx(self, value: Any) -> DPTArray:
"""Convert value to payload."""
return self._climate_mode_transcoder.to_knx(value)
def from_knx(self, payload: DPTArray | DPTBinary) -> HVACOperationMode | None:
"""Convert current payload to value."""
return self._climate_mode_transcoder.from_knx(payload)
class RemoteValueControllerMode(RemoteValueClimateModeBase[HVACControllerMode]):
"""Abstraction for remote value of KNX climate controller modes."""
def __init__(
self,
xknx: XKNX,
group_address: GroupAddressesType | None = None,
group_address_state: GroupAddressesType | None = None,
sync_state: bool | int | float | str = True,
device_name: str | None = None,
feature_name: str = "Controller Mode",
after_update_cb: AsyncCallbackType | None = None,
):
"""Initialize remote value of KNX climate mode."""
super().__init__(
xknx,
group_address=group_address,
group_address_state=group_address_state,
sync_state=sync_state,
device_name=device_name,
feature_name=feature_name,
after_update_cb=after_update_cb,
)
def supported_operation_modes(self) -> list[HVACControllerMode]:
"""Return a list of all supported operation modes."""
return list(DPTHVACContrMode.SUPPORTED_MODES.values())
def to_knx(self, value: Any) -> DPTArray:
"""Convert value to payload."""
return DPTHVACContrMode.to_knx(value)
def from_knx(self, payload: DPTArray | DPTBinary) -> HVACControllerMode | None:
"""Convert current payload to value."""
return DPTHVACContrMode.from_knx(payload)
class RemoteValueBinaryOperationMode(RemoteValueClimateModeBase[HVACOperationMode]):
"""Abstraction for remote value of split up KNX climate modes."""
def __init__(
self,
xknx: XKNX,
group_address: GroupAddressesType | None = None,
group_address_state: GroupAddressesType | None = None,
sync_state: bool | int | float | str = True,
device_name: str | None = None,
feature_name: str = "Climate mode binary",
after_update_cb: AsyncCallbackType | None = None,
operation_mode: HVACOperationMode | None = None,
):
"""Initialize remote value of KNX DPT 1 representing a climate operation mode."""
if not isinstance(operation_mode, HVACOperationMode):
raise ConversionError(
"Invalid operation mode type",
operation_mode=str(operation_mode),
device_name=str(device_name),
feature_name=feature_name,
)
if operation_mode not in self.supported_operation_modes():
raise ConversionError(
"Operation mode not supported for binary mode object",
operation_mode=str(operation_mode),
device_name=str(device_name),
feature_name=feature_name,
)
self.operation_mode = operation_mode
super().__init__(
xknx,
group_address=group_address,
group_address_state=group_address_state,
sync_state=sync_state,
device_name=device_name,
feature_name=feature_name,
after_update_cb=after_update_cb,
)
def to_knx(self, value: Any) -> DPTBinary:
"""Convert value to payload."""
if isinstance(value, HVACOperationMode):
# foreign operation modes will set the RemoteValue to False
return DPTBinary(value == self.operation_mode)
raise ConversionError(
"value invalid",
value=value,
device_name=self.device_name,
feature_name=self.feature_name,
)
def supported_operation_modes(self) -> list[HVACOperationMode]:
"""Return a list of the configured operation mode."""
return [
HVACOperationMode.COMFORT,
HVACOperationMode.FROST_PROTECTION,
HVACOperationMode.NIGHT,
HVACOperationMode.STANDBY,
]
def from_knx(self, payload: DPTArray | DPTBinary) -> HVACOperationMode | None:
"""Convert current payload to value."""
if payload.value == 1:
return self.operation_mode
if payload.value == 0:
return None
raise CouldNotParseTelegram(
"Payload invalid",
payload=str(payload),
device_name=self.device_name,
feature_name=self.feature_name,
)
class RemoteValueBinaryHeatCool(RemoteValueClimateModeBase[HVACControllerMode]):
"""Abstraction for remote value of heat/cool controller mode."""
def __init__(
self,
xknx: XKNX,
group_address: GroupAddressesType | None = None,
group_address_state: GroupAddressesType | None = None,
sync_state: bool | int | float | str = True,
device_name: str | None = None,
feature_name: str = "Controller mode Heat/Cool",
after_update_cb: AsyncCallbackType | None = None,
controller_mode: HVACControllerMode | None = None,
):
"""Initialize remote value of KNX DPT 1 representing a climate controller mode."""
if not isinstance(controller_mode, HVACControllerMode):
raise ConversionError(
"Invalid controller mode type",
controller_mode=str(controller_mode),
device_name=str(device_name),
feature_name=feature_name,
)
if controller_mode not in self.supported_operation_modes():
raise ConversionError(
"Controller mode not supported for binary mode object",
controller_mode=str(controller_mode),
device_name=str(device_name),
feature_name=feature_name,
)
self.controller_mode = controller_mode
super().__init__(
xknx,
group_address=group_address,
group_address_state=group_address_state,
sync_state=sync_state,
device_name=device_name,
feature_name=feature_name,
after_update_cb=after_update_cb,
)
def supported_operation_modes(self) -> list[HVACControllerMode]:
"""Return a list of the configured operation mode."""
return [HVACControllerMode.HEAT, HVACControllerMode.COOL]
def to_knx(self, value: Any) -> DPTBinary:
"""Convert value to payload."""
if isinstance(value, HVACControllerMode):
# foreign operation modes will set the RemoteValue to False
return DPTBinary(value == self.controller_mode)
raise ConversionError(
"value invalid",
value=value,
device_name=self.device_name,
feature_name=self.feature_name,
)
def from_knx(self, payload: DPTArray | DPTBinary) -> HVACControllerMode | None:
"""Convert current payload to value."""
if payload.value == 1:
return self.controller_mode
if payload.value == 0:
# return the other operation mode
return next(
(
_op
for _op in self.supported_operation_modes()
if _op is not self.controller_mode
),
None,
)
raise CouldNotParseTelegram(
"Payload invalid",
payload=str(payload),
device_name=self.device_name,
feature_name=self.feature_name,
)
|
805a0277497141b74d439b00bca4053f566e3670
|
8e6146819d50f3f6a0a725036f36f261ebbc2951
|
/derl/envs/tasks/manipulation.py
|
ce0f11cb66a21e5cf9da5e21e1394ee14a899c0d
|
[] |
no_license
|
agrimgupta92/derl
|
54399730fe4de08a958ec847b34e0c036a9f30fa
|
91e077e355b27bb74fa920c708d606ff9f4d611e
|
refs/heads/main
| 2023-08-21T19:20:05.150847
| 2021-10-09T19:13:05
| 2021-10-09T19:13:05
| 414,080,496
| 107
| 24
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,005
|
py
|
manipulation.py
|
import numpy as np
from gym import utils
from scipy.spatial import distance as scipy_distance
import derl.utils.mjpy as mu
from derl.config import cfg
from derl.envs.modules.agent import Agent
from derl.envs.modules.objects import Objects
from derl.envs.modules.terrain import Terrain
from derl.envs.tasks.unimal import UnimalEnv
from derl.envs.wrappers.hfield import StandReward
from derl.envs.wrappers.hfield import TerminateOnFalling
from derl.envs.wrappers.hfield import UnimalHeightObs
from derl.envs.wrappers.metrics import ManipulationMetric
class ManipulationTask(UnimalEnv, utils.EzPickle):
def __init__(self, xml_str, unimal_id):
UnimalEnv.__init__(self, xml_str, unimal_id)
self.obj_type = cfg.OBJECT.TYPE
self.obj_name = "{}/1".format(self.obj_type)
def _cal_agent_obj_dist(self):
agent_pos = [
self.sim.data.get_site_xpos(agent_site).copy()
for agent_site in self.metadata["agent_sites"]
]
if self.obj_type == "box":
obj_pos = [
self.sim.data.get_site_xpos(obj_site).copy()
for obj_site in self.metadata["object_sites"]
]
else:
obj_pos = [
self.sim.data.get_body_xpos(self.obj_name).copy()
]
distance = scipy_distance.cdist(agent_pos, obj_pos, "euclidean")
return np.min(distance)
###########################################################################
# Sim step and reset
###########################################################################
def step(self, action):
agent_obj_d_before = self._cal_agent_obj_dist()
obj_pos_before = self.sim.data.get_body_xpos(self.obj_name)[:2].copy()
self.do_simulation(action)
xy_pos_after = self.sim.data.get_body_xpos("torso/0")[:2].copy()
agent_obj_d_after = self._cal_agent_obj_dist()
obj_pos_after = self.sim.data.get_body_xpos(self.obj_name)[:2].copy()
# Reward given to agent to reach/or be near to obj
reach_reward = (agent_obj_d_before - agent_obj_d_after) * 100.0
if (
agent_obj_d_after <= cfg.OBJECT.SUCCESS_MARGIN
and not self.reached_obj
):
reach_reward += 10.0
self.reached_obj = True
# Reward given to agent to "push" obj to be near to goal
push_reward = 0.0
goal_pos = self.modules["Objects"].goal_pos[:2]
obj_goal_d_after = None
agent_goal_d_after = None
if self.reached_obj:
obj_goal_d_before = np.linalg.norm(goal_pos - obj_pos_before)
obj_goal_d_after = np.linalg.norm(goal_pos - obj_pos_after)
push_reward = (obj_goal_d_before - obj_goal_d_after) * 100.0
if obj_goal_d_after <= cfg.OBJECT.SUCCESS_MARGIN:
push_reward += 10.0
self.reach_goal_obj = True
agent_goal_d_after = np.linalg.norm(xy_pos_after - goal_pos)
if agent_goal_d_after <= cfg.OBJECT.SUCCESS_MARGIN:
self.reach_goal_agent = True
ctrl_cost = self.control_cost(action)
reward = reach_reward + push_reward - ctrl_cost
observation = self._get_obs()
info = {
"x_pos": xy_pos_after[0],
"y_pos": xy_pos_after[1],
"__reward__reach": reach_reward,
"__reward__push": push_reward,
"__reward__energy": self.calculate_energy(),
"__reward__ctrl": ctrl_cost,
"__reward__manipulation": reach_reward + push_reward,
"agent_obj_d_after": agent_obj_d_after,
"agent_goal_d_after": agent_goal_d_after,
"goal_pos": np.asarray(goal_pos),
"reached_obj": self.reached_obj,
"reach_goal_obj": self.reach_goal_obj,
"reach_goal_agent": self.reach_goal_agent,
"init_obj_goal_d": self.init_obj_goal_d,
"obj_goal_d_after": obj_goal_d_after,
}
# Update viewer with markers, if any
if self.viewer is not None:
self.viewer._markers[:] = []
for marker in self.metadata["markers"]:
self.viewer.add_marker(**marker)
return observation, reward, False, info
def reset(self):
obs = super().reset()
self.reached_obj = False
self.reach_goal_agent = False
self.reach_goal_obj = False
self.init_obj_goal_d = np.linalg.norm(
np.asarray(self.modules["Objects"].goal_pos[:2])
- np.asarray(self.sim.data.get_body_xpos(self.obj_name)[:2])
)
return obs
def make_env_manipulation(xml, unimal_id):
env = ManipulationTask(xml, unimal_id)
# Add modules
for module in cfg.ENV.MODULES:
env.add_module(globals()[module])
env.reset()
# Add all wrappers
env = UnimalHeightObs(env)
env = StandReward(env)
env = TerminateOnFalling(env)
env = ManipulationMetric(env)
return env
|
d7a597d9aa575f41d501f194baeacb56167175af
|
e04a5b20f946c5033f24d4dd8acda395a98747c5
|
/tableau-export/python-lib/tableau_utils.py
|
c464b4ceb8089a39f920a9ff5500ad05b6992251
|
[
"LicenseRef-scancode-free-unknown",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-other-permissive"
] |
permissive
|
dataiku/dataiku-contrib
|
2a2f2fb420d7f2ab49b82d80659cc6f6ec1d8f61
|
9a9f189e8a544a81c205d8a8b3779d4517b88653
|
refs/heads/master
| 2023-09-04T03:33:58.625093
| 2023-04-26T08:17:34
| 2023-04-26T08:17:34
| 45,074,604
| 103
| 94
|
Apache-2.0
| 2023-06-08T21:29:07
| 2015-10-27T22:41:00
|
Python
|
UTF-8
|
Python
| false
| false
| 5,269
|
py
|
tableau_utils.py
|
# -*- coding: utf-8 -*-
from tableausdk import *
from tableausdk.Extract import *
from tableausdk.Server import *
# Because of a bug in Tableau, we need to load Tableau before pandas.
# See http://community.tableau.com/thread/156790 for more info
import dataiku
import os
from dataiku.customrecipe import *
from datetime import datetime
typeMap = {
'tinyint': Type.INTEGER,
'smallint':Type.INTEGER,
'int': Type.INTEGER,
'bigint': Type.INTEGER,
'float': Type.DOUBLE,
'double': Type.DOUBLE,
'boolean': Type.BOOLEAN,
'string': Type.UNICODE_STRING,
'date': Type.DATETIME,
'array': Type.UNICODE_STRING,
'map': Type.UNICODE_STRING,
'object': Type.UNICODE_STRING
}
def convert_type(type):
return typeMap.get(type,Type.UNICODE_STRING)
fieldSetterMap = {
'boolean': lambda row, col, val: row.setBoolean(col, val),
'tinyint': lambda row, col, val: row.setInteger(col, int(val)),
'smallint': lambda row, col, val: row.setInteger(col, int(val)),
'int': lambda row, col, val: row.setInteger(col, int(val)),
'bigint': lambda row, col, val: row.setInteger(col, int(val)),
'float': lambda row, col, val: row.setDouble (col, float(val)),
'double': lambda row, col, val: row.setDouble (col, float(val)),
'date': lambda row, col, val: row.setDateTime(col, val.year, val.month, val.day, val.hour, val.minute, val.second, val.microsecond),
'string': lambda row, col, val: row.setString(col, val),
'array': lambda row, col, val: row.setString(col, val),
'map': lambda row, col, val: row.setString(col, val),
'object': lambda row, col, val: row.setString(col, val),
}
def makeTableDefinition(schema):
# todo partition
tableDef = TableDefinition()
tableDef.setDefaultCollation(Collation.EN_GB)
for col in schema:
tableDef.addColumn(col['name'], convert_type(col['type']))
return tableDef
def insertData(dataset_in, table_out):
tableDef = table_out.getTableDefinition()
schema = dataset_in.read_schema()
nbCol = len(schema)
for input_row in dataset_in.iter_rows(log_every=10000):
output_row = Row(tableDef)
for colNo in range(nbCol):
data = input_row[schema[colNo]['name']]
try:
fieldSetterMap[schema[colNo]['type']](output_row, colNo, data)
except Exception, e:
print "Failed setting field %s to value %s: %s" % (schema[colNo]["name"], data, e)
pass
table_out.insert(output_row)
def output_filename():
result = get_recipe_config().get("tde_file_name", "output.tde")
if not result.endswith('.tde'):
result = result + '.tde'
return result
def tde_export():
print "Start export to TDE"
input_name = get_input_names_for_role('input')[0]
input_dataset = dataiku.Dataset(input_name)
input_schema = input_dataset.read_schema()
partitions = input_dataset.list_partitions(raise_if_empty=False)
if partitions not in [[], [u'NP']]:
raise Exception("Due to the current APIs, this plugin cannot support partitioned input "
"(and it seems the input dataset " +input_name+ " is partitioned). "
"A workaround is to first run a sync recipe "
"from " +input_name+ " into a non partitioned dataset, "
"then take the latter as input for tde export.")
output_name = get_output_names_for_role('output_folder')[0]
output_folder = dataiku.Folder(output_name)
output_path = output_folder.get_path()
os.chdir(output_path)
# Clean output dir. We assume there is no subfolder.
# (because this recipe never creates one. If there is, better fail than remove someone else's data)
for file in os.listdir(output_path):
os.remove(file)
ExtractAPI.initialize()
with Extract(output_filename()) as extract:
assert(not extract.hasTable('Extract'))
tableDef = makeTableDefinition(input_schema)
table = extract.addTable('Extract', tableDef)
insertData(input_dataset, table)
extract.close()
ExtractAPI.cleanup()
print "End export to TDE"
def upload():
print "Start upload to Tableau server"
ServerAPI.initialize()
serverConnection = ServerConnection()
proxyUsername = get_recipe_config().get("proxy_username",'')
proxyPassword = get_recipe_config().get("proxy_password",'')
if proxyUsername != '':
serverConnection.setProxyCredentials(proxyUsername, proxyPassword)
serverConnection.connect(
get_recipe_config()['server_url'],
get_recipe_config()['username'],
get_recipe_config().get('password',''),
get_recipe_config().get('site_id','') );
project = get_recipe_config().get('project','default')
if project == '':
project = 'default'
output_table = get_recipe_config().get('output_table','DSS_extract')
if output_table == '':
output_table = 'DSS_extract'
serverConnection.publishExtract(
output_filename(),
project,
output_table,
True ); # overwrite
serverConnection.disconnect();
serverConnection.close();
ServerAPI.cleanup();
print "End upload to Tableau server"
|
cce133fef0216cc0bd50fdcfb3ac36a0a72403b3
|
279f415dd1e06c594c6c87deda57e201c73c4542
|
/espnet2/spk/pooling/chn_attn_stat_pooling.py
|
4bc1678fa7efa4b82d2a6d3747ad44c2cb368379
|
[
"Apache-2.0"
] |
permissive
|
espnet/espnet
|
f7ba47271c1a6b1ed606dbbfb04a7f14220bb585
|
bcd20948db7846ee523443ef9fd78c7a1248c95e
|
refs/heads/master
| 2023-08-28T23:43:34.238336
| 2023-08-23T02:51:39
| 2023-08-23T02:51:39
| 114,054,873
| 7,242
| 2,244
|
Apache-2.0
| 2023-09-14T08:01:11
| 2017-12-13T00:45:11
|
Python
|
UTF-8
|
Python
| false
| false
| 1,548
|
py
|
chn_attn_stat_pooling.py
|
import torch
import torch.nn as nn
from espnet2.spk.pooling.abs_pooling import AbsPooling
class ChnAttnStatPooling(AbsPooling):
"""
Aggregates frame-level features to single utterance-level feature.
Proposed in B.Desplanques et al., "ECAPA-TDNN: Emphasized Channel
Attention, Propagation and Aggregation in TDNN Based Speaker Verification"
args:
input_size: dimensionality of the input frame-level embeddings.
Determined by encoder hyperparameter.
For this pooling layer, the output dimensionality will be double of
the input_size
"""
def __init__(self, input_size: int = 1536):
super().__init__()
self.attention = nn.Sequential(
nn.Conv1d(input_size * 3, 128, kernel_size=1),
nn.ReLU(),
nn.BatchNorm1d(128),
nn.Conv1d(128, input_size, kernel_size=1),
nn.Softmax(dim=2),
)
def forward(self, x):
t = x.size()[-1]
global_x = torch.cat(
(
x,
torch.mean(x, dim=2, keepdim=True).repeat(1, 1, t),
torch.sqrt(
torch.var(x, dim=2, keepdim=True).clamp(min=1e-4, max=1e4)
).repeat(1, 1, t),
),
dim=1,
)
w = self.attention(global_x)
mu = torch.sum(x * w, dim=2)
sg = torch.sqrt(
(torch.sum((x**2) * w, dim=2) - mu**2).clamp(min=1e-4, max=1e4)
)
x = torch.cat((mu, sg), dim=1)
return x
|
a21fbb8c5880b9e753be4612032c4684ff4423ac
|
dea96d9c6f510d759bb00cb214c4fcd1ac7b32cd
|
/数据结构/NowCode/2_JumpFloor.py
|
169ef182db4f1837c8a7b339f045c8c55f3024d4
|
[
"MIT"
] |
permissive
|
moxi624/LearningNotes
|
cf92103f6a2592b9017f27bdfac57459ef8e1f6a
|
bb62ae291955944d4d73acaaf4531786314214ac
|
refs/heads/master
| 2023-03-08T03:59:32.137474
| 2022-10-10T15:21:48
| 2022-10-10T15:21:59
| 229,531,057
| 618
| 156
|
MIT
| 2023-03-07T02:14:59
| 2019-12-22T07:03:57
|
Java
|
UTF-8
|
Python
| false
| false
| 561
|
py
|
2_JumpFloor.py
|
# 一只青蛙一次可以跳上1级台阶,也可以跳上2级。求该青蛙跳上一个n级的台阶总共有多少种跳法(先后次序不同算不同的结果)
class Solution:
def JumpFloor(self, n):
if n == 0:
return 0;
if n == 1:
return 1;
if n == 2:
return 2;
ret = 0
a = 1
b = 2
for i in range(3, n + 1):
ret = a + b
a = b
b = ret
return ret
if __name__ == '__main__':
print(Solution().JumpFloor(10))
|
b2ac827e9a783c1a41466d01cdc5418d0a63d0f0
|
d05ff6dda43729011b7d469b0a2bc02ed66b6342
|
/frappe/website/web_form/request_data/request_data.py
|
1224e3f095eb3f0f2795872ab8b51816679808a3
|
[
"MIT"
] |
permissive
|
frappe/frappe
|
520c14bed3810c3360629a81dcc33f0ebe21ac4d
|
dd8f314bf4a8a4739eebbfac741abc533ac58bc1
|
refs/heads/develop
| 2023-08-30T19:29:10.406706
| 2023-08-30T11:20:40
| 2023-08-30T11:20:40
| 1,864,194
| 5,955
| 3,735
|
MIT
| 2023-09-14T16:08:04
| 2011-06-08T08:14:16
|
Python
|
UTF-8
|
Python
| false
| false
| 32
|
py
|
request_data.py
|
def get_context(context):
pass
|
0c6f8204736f0d1d2cba25561b7e41bb9b282d47
|
7ff6521386936a2afac803d03e1a285cdb873d8f
|
/supersuit/vector/multiproc_vec.py
|
9901fadaac121e944cd39fa8c02e97c1d2a23111
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
Farama-Foundation/SuperSuit
|
4671856a4102b528a9b4e4c705fa0abfff99be18
|
dc4dc8aa34c2623fc0d0abdb3e53b9f67f468253
|
refs/heads/master
| 2023-08-03T12:54:52.623837
| 2023-07-21T00:43:26
| 2023-07-21T00:43:26
| 252,030,937
| 156
| 32
|
NOASSERTION
| 2023-07-20T15:59:02
| 2020-04-01T00:27:34
|
Python
|
UTF-8
|
Python
| false
| false
| 8,671
|
py
|
multiproc_vec.py
|
import copy
import multiprocessing as mp
import time
import traceback
import gymnasium.vector
import numpy as np
from gymnasium.vector.utils import (
concatenate,
create_empty_array,
create_shared_memory,
iterate,
read_from_shared_memory,
write_to_shared_memory,
)
from .utils.shared_array import SharedArray
def compress_info(infos):
non_empty_infs = [(i, info) for i, info in enumerate(infos) if info]
return non_empty_infs
def decompress_info(num_envs, idx_starts, comp_infos):
all_info = [{}] * num_envs
for idx_start, comp_infos in zip(idx_starts, comp_infos):
for i, info in comp_infos:
all_info[idx_start + i] = info
return all_info
def write_observations(vec_env, env_start_idx, shared_obs, obs):
obs = list(iterate(vec_env.observation_space, obs))
for i in range(vec_env.num_envs):
write_to_shared_memory(
vec_env.observation_space,
env_start_idx + i,
obs[i],
shared_obs,
)
def numpy_deepcopy(buf):
if isinstance(buf, dict):
return {name: numpy_deepcopy(v) for name, v in buf.items()}
elif isinstance(buf, tuple):
return tuple(numpy_deepcopy(v) for v in buf)
elif isinstance(buf, np.ndarray):
return buf.copy()
else:
raise ValueError("numpy_deepcopy ")
def async_loop(
vec_env_constr, inpt_p, pipe, shared_obs, shared_rews, shared_terms, shared_truncs
):
inpt_p.close()
try:
vec_env = vec_env_constr()
pipe.send(vec_env.num_envs)
env_start_idx = pipe.recv()
env_end_idx = env_start_idx + vec_env.num_envs
while True:
instr = pipe.recv()
comp_infos = []
if instr == "close":
vec_env.close()
elif isinstance(instr, tuple):
name, data = instr
if name == "reset":
observations, infos = vec_env.reset(seed=data[0], options=data[1])
comp_infos = compress_info(infos)
write_observations(vec_env, env_start_idx, shared_obs, observations)
shared_terms.np_arr[env_start_idx:env_end_idx] = False
shared_truncs.np_arr[env_start_idx:env_end_idx] = False
shared_rews.np_arr[env_start_idx:env_end_idx] = 0.0
elif name == "step":
actions = data
actions = concatenate(
vec_env.action_space,
actions,
create_empty_array(vec_env.action_space, n=len(actions)),
)
observations, rewards, terms, truncs, infos = vec_env.step(actions)
write_observations(vec_env, env_start_idx, shared_obs, observations)
shared_terms.np_arr[env_start_idx:env_end_idx] = terms
shared_truncs.np_arr[env_start_idx:env_end_idx] = truncs
shared_rews.np_arr[env_start_idx:env_end_idx] = rewards
comp_infos = compress_info(infos)
elif name == "env_is_wrapped":
comp_infos = vec_env.env_is_wrapped(data)
elif name == "render":
render_result = vec_env.render(data)
if data == "rgb_array":
comp_infos = render_result
else:
raise AssertionError("bad tuple instruction name: " + name)
elif instr == "terminate":
return
else:
raise AssertionError("bad instruction: " + instr)
pipe.send(comp_infos)
except BaseException as e:
tb = traceback.format_exc()
pipe.send((e, tb))
class ProcConcatVec(gymnasium.vector.VectorEnv):
def __init__(
self, vec_env_constrs, observation_space, action_space, tot_num_envs, metadata
):
self.observation_space = observation_space
self.action_space = action_space
self.num_envs = num_envs = tot_num_envs
self.metadata = metadata
self.shared_obs = create_shared_memory(self.observation_space, n=self.num_envs)
self.shared_act = create_shared_memory(self.action_space, n=self.num_envs)
self.shared_rews = SharedArray((num_envs,), dtype=np.float32)
self.shared_terms = SharedArray((num_envs,), dtype=np.uint8)
self.shared_truncs = SharedArray((num_envs,), dtype=np.uint8)
self.observations_buffers = read_from_shared_memory(
self.observation_space, self.shared_obs, n=self.num_envs
)
self.graceful_shutdown_timeout = 10
pipes = []
procs = []
for constr in vec_env_constrs:
inpt, outpt = mp.Pipe()
constr = gymnasium.vector.async_vector_env.CloudpickleWrapper(constr)
proc = mp.Process(
target=async_loop,
args=(
constr,
inpt,
outpt,
self.shared_obs,
self.shared_rews,
self.shared_terms,
self.shared_truncs,
),
)
proc.start()
outpt.close()
pipes.append(inpt)
procs.append(proc)
self.pipes = pipes
self.procs = procs
num_envs = 0
env_nums = self._receive_info()
idx_starts = []
for pipe, cnum_env in zip(self.pipes, env_nums):
cur_env_idx = num_envs
num_envs += cnum_env
pipe.send(cur_env_idx)
idx_starts.append(cur_env_idx)
idx_starts.append(num_envs)
assert num_envs == tot_num_envs
self.idx_starts = idx_starts
def reset(self, seed=None, options=None):
for i, pipe in enumerate(self.pipes):
if seed is not None:
pipe.send(("reset", (seed + i, options)))
else:
pipe.send(("reset", (seed, options)))
info = self._receive_info()
return numpy_deepcopy(self.observations_buffers), copy.deepcopy(info)
def step_async(self, actions):
actions = list(iterate(self.action_space, actions))
for i, pipe in enumerate(self.pipes):
start, end = self.idx_starts[i : i + 2]
pipe.send(("step", actions[start:end]))
def _receive_info(self):
all_data = []
for cin in self.pipes:
data = cin.recv()
if isinstance(data, tuple):
e, tb = data
print(tb)
raise e
all_data.append(data)
return all_data
def step_wait(self):
compressed_infos = self._receive_info()
infos = decompress_info(self.num_envs, self.idx_starts, compressed_infos)
rewards = self.shared_rews.np_arr
terms = self.shared_terms.np_arr
truncs = self.shared_truncs.np_arr
return (
numpy_deepcopy(self.observations_buffers),
rewards.copy(),
terms.astype(bool).copy(),
truncs.astype(bool).copy(),
copy.deepcopy(infos),
)
def step(self, actions):
self.step_async(actions)
return self.step_wait()
def __del__(self):
self.close()
def render(self):
self.pipes[0].send("render")
render_result = self.pipes[0].recv()
if isinstance(render_result, tuple):
e, tb = render_result
print(tb)
raise e
return render_result
def close(self):
try:
for pipe, proc in zip(self.pipes, self.procs):
if proc.is_alive():
pipe.send("close")
except OSError:
pass
else:
deadline = (
None
if self.graceful_shutdown_timeout is None
else time.monotonic() + self.graceful_shutdown_timeout
)
for proc in self.procs:
timeout = None if deadline is None else deadline - time.monotonic()
if timeout is not None and timeout <= 0:
break
proc.join(timeout)
for pipe, proc in zip(self.pipes, self.procs):
if proc.is_alive():
proc.kill()
pipe.close()
def env_is_wrapped(self, wrapper_class, indices=None):
for i, pipe in enumerate(self.pipes):
pipe.send(("env_is_wrapped", wrapper_class))
results = self._receive_info()
return sum(results, [])
|
c00a29aecbb835e2f393ef735c7577a3f87b7373
|
dbb120cceaed09027f250bedbb6f5a8c5d4c71f5
|
/netket/nn/fast_masked_linear.py
|
a8a2b3b90619353a64ad91c242cb71e8e34abd6f
|
[
"Apache-2.0"
] |
permissive
|
netket/netket
|
b0ec4dc6e0ed5493299a38b8dbfd06e9f946e3b3
|
f4f2844739302fd7e044b722eae8a93d0bfc59ec
|
refs/heads/master
| 2023-08-29T12:03:29.446789
| 2023-08-20T10:21:41
| 2023-08-20T10:21:41
| 130,741,783
| 467
| 181
|
Apache-2.0
| 2023-09-14T20:40:47
| 2018-04-23T18:48:08
|
Python
|
UTF-8
|
Python
| false
| false
| 16,205
|
py
|
fast_masked_linear.py
|
# Copyright 2021 The NetKet Authors - All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
from flax import linen as nn
from flax.linen.dtypes import promote_dtype
from jax import lax
from jax import numpy as jnp
from jax.nn.initializers import zeros
from netket.nn.masked_linear import (
MaskedConv1D,
MaskedConv2D,
MaskedDense1D,
_conv_dimension_numbers,
default_kernel_init,
wrap_kernel_init,
)
from netket.utils.types import Array, DType, NNInitFunc
from netket.utils import deprecate_dtype
@deprecate_dtype
class FastMaskedDense1D(nn.Module):
"""
1D linear transformation module with mask for fast autoregressive NN.
See :class:`netket.nn.FastMaskedConv1D` for a brief explanation of fast autoregressive sampling.
TODO: FastMaskedDense1D does not support JIT yet, because it involves slicing the cached inputs
and the weights with a dynamic shape.
"""
size: int
"""number of sites."""
features: int
"""output feature density, should be the last dimension."""
exclusive: bool
"""True if an output element does not depend on the input element at the same index."""
use_bias: bool = True
"""whether to add a bias to the output (default: True)."""
param_dtype: DType = jnp.float64
"""the dtype of the computation (default: float64)."""
precision: Any = None
"""numerical precision of the computation, see :class:`jax.lax.Precision` for details."""
kernel_init: NNInitFunc = default_kernel_init
"""initializer for the weight matrix."""
bias_init: NNInitFunc = zeros
"""initializer for the bias."""
@nn.compact
def update_site(self, inputs: Array, index: int) -> Array:
"""
Adds an input site into the cache, and applies the masked linear transformation to the cache.
Args:
inputs: an input site to be added into the cache with dimensions (batch, features).
index: the index of the output site. The index of the input site should be `index - self.exclusive`.
Returns:
The output site with dimensions (batch, features).
"""
if inputs.ndim == 1:
is_single_input = True
inputs = jnp.expand_dims(inputs, axis=0)
else:
is_single_input = False
batch, in_features = inputs.shape
size = self.size
if self.use_bias:
bias = self.param(
"bias", self.bias_init, (size, self.features), self.param_dtype
)
else:
bias = None
# The construction of `mask` will be optimized to a constant by JIT
mask = jnp.ones((size, size), dtype=self.param_dtype)
mask = jnp.triu(mask, self.exclusive)
mask = jnp.kron(
mask, jnp.ones((in_features, self.features), dtype=self.param_dtype)
)
kernel = self.param(
"kernel",
wrap_kernel_init(self.kernel_init, mask),
(size * in_features, size * self.features),
self.param_dtype,
)
inputs, kernel, mask, bias = promote_dtype(
inputs, kernel, mask, bias, dtype=None
)
# Number of input sites depended by the output site at the index
size_i = index + 1
# Initialize the cache with zeros, and the RNG key is None
# `cache.dtype` must be the same as `inputs.dtype` (no promotion)
_cache = self.variable(
"cache", "inputs", zeros, None, (batch, size, in_features), inputs.dtype
)
initializing = self.is_mutable_collection("params")
if not initializing:
# Add the input site into the cache
# To write the cache, use `_cache.value` as the left value of the assignment
_cache.value = jnp.where(
index - self.exclusive >= 0,
_cache.value.at[:, index - self.exclusive, :].set(inputs),
_cache.value,
)
cache = _cache.value
cache_i = cache[:, :size_i, :]
cache_i = cache_i.reshape((batch, size_i * in_features))
mask_i = mask.reshape((size, in_features, size, self.features))
mask_i = mask_i[:size_i, :, index, :]
mask_i = mask_i.reshape((size_i * in_features, self.features))
kernel_i = kernel.reshape((size, in_features, size, self.features))
kernel_i = kernel_i[:size_i, :, index, :]
kernel_i = kernel_i.reshape((size_i * in_features, self.features))
y_i = lax.dot(cache_i, mask_i * kernel_i, precision=self.precision)
if self.use_bias:
y_i = y_i + bias[index, :]
assert y_i.shape[1] == self.features
if is_single_input:
y_i = y_i.squeeze(axis=0)
return y_i
def __call__(self, inputs: Array) -> Array:
"""
Applies the masked linear transformation to all input sites.
Args:
inputs: input data with dimensions (batch, size, features).
Returns:
The transformed data.
"""
return MaskedDense1D.__call__(self, inputs)
@deprecate_dtype
class FastMaskedConv1D(nn.Module):
"""
1D convolution module with mask for fast autoregressive NN.
The fast autoregressive sampling is described in `Ramachandran et. {\\it al} <https://arxiv.org/abs/1704.06001>`_.
To generate one sample using an autoregressive network, we need to evaluate the network `N` times, where `N` is
the number of input sites. But we only change one input site each time, so we can cache unchanged intermediate results
and avoid repeated computation.
"""
features: int
"""number of convolution filters."""
kernel_size: int
"""length of the convolutional kernel."""
kernel_dilation: int
"""dilation factor of the convolution kernel."""
exclusive: bool
"""True if an output element does not depend on the input element at the same index."""
feature_group_count: int = 1
"""if specified, divides the input features into groups (default: 1)."""
use_bias: bool = True
"""whether to add a bias to the output (default: True)."""
param_dtype: DType = jnp.float64
"""the dtype of the computation (default: float64)."""
precision: Any = None
"""numerical precision of the computation, see :class:`jax.lax.Precision` for details."""
kernel_init: NNInitFunc = default_kernel_init
"""initializer for the convolutional kernel."""
bias_init: NNInitFunc = zeros
"""initializer for the bias."""
@nn.compact
def update_site(self, inputs: Array, index: int) -> Array:
"""
Adds an input site into the cache, and applies the masked convolution to the cache.
Args:
inputs: an input site to be added into the cache with dimensions (batch, features).
index: the index of the output site. The index of the input site should be `index - self.exclusive`.
Returns:
The next output site with dimensions (batch, features).
"""
kernel_size = self.kernel_size - self.exclusive
dilation = self.kernel_dilation
if inputs.ndim == 1:
is_single_input = True
inputs = jnp.expand_dims(inputs, axis=0)
else:
is_single_input = False
batch, in_features = inputs.shape
assert in_features % self.feature_group_count == 0
cache_size = kernel_size * dilation - (not self.exclusive) * (dilation - 1)
kernel_shape = (
kernel_size,
in_features // self.feature_group_count,
self.features,
)
kernel = self.param("kernel", self.kernel_init, kernel_shape, self.param_dtype)
if self.use_bias:
bias = self.param(
"bias", self.bias_init, (self.features,), self.param_dtype
)
else:
bias = None
inputs, kernel, bias = promote_dtype(inputs, kernel, bias, dtype=None)
# Initialize the cache with zeros, and the RNG key is None
# `cache.dtype` must be the same as `inputs.dtype` (no promotion)
_cache = self.variable(
"cache",
"inputs",
zeros,
None,
(batch, cache_size, in_features),
inputs.dtype,
)
initializing = self.is_mutable_collection("params")
if not initializing:
# Add the input site into the cache
# To write the cache, use `_cache.value` as the left value of the assignment
_cache.value = jnp.where(
index - self.exclusive >= 0,
jnp.concatenate(
[_cache.value[:, 1:, :], jnp.expand_dims(inputs, axis=1)], axis=1
),
_cache.value,
)
cache = _cache.value
if self.exclusive and dilation > 1:
cache = cache[:, : -(dilation - 1), :]
dimension_numbers = _conv_dimension_numbers(cache.shape)
y_i = lax.conv_general_dilated(
cache,
kernel,
window_strides=(1,),
padding="VALID",
lhs_dilation=(1,),
rhs_dilation=(dilation,),
dimension_numbers=dimension_numbers,
feature_group_count=self.feature_group_count,
precision=self.precision,
)
if self.use_bias:
y_i = y_i + bias
y_i = y_i.squeeze(axis=1)
if is_single_input:
y_i = y_i.squeeze(axis=0)
return y_i
def __call__(self, inputs: Array) -> Array:
"""
Applies the masked convolution to all input sites.
Args:
inputs: input data with dimensions (batch, size, features).
Returns:
The convolved data.
"""
return MaskedConv1D.__call__(self, inputs)
@deprecate_dtype
class FastMaskedConv2D(nn.Module):
"""
2D convolution module with mask for fast autoregressive NN.
See :class:`netket.nn.FastMaskedConv1D` for a brief explanation of fast autoregressive sampling.
"""
L: int
"""edge length of the 2D lattice."""
features: int
"""number of convolution filters."""
kernel_size: tuple[int, int]
"""shape of the convolutional kernel `(h, w)`. Typically, :math:`h = w // 2 + 1`."""
kernel_dilation: tuple[int, int]
"""a sequence of 2 integers, giving the dilation factor to
apply in each spatial dimension of the convolution kernel."""
exclusive: bool
"""True if an output element does not depend on the input element at the same index."""
feature_group_count: int = 1
"""if specified, divides the input features into groups (default: 1)."""
use_bias: bool = True
"""whether to add a bias to the output (default: True)."""
param_dtype: DType = jnp.float64
"""the dtype of the computation (default: float64)."""
precision: Any = None
"""numerical precision of the computation, see :class:`jax.lax.Precision` for details."""
kernel_init: NNInitFunc = default_kernel_init
"""initializer for the convolutional kernel."""
bias_init: NNInitFunc = zeros
"""initializer for the bias."""
def setup(self):
MaskedConv2D.setup(self)
@nn.compact
def update_site(self, inputs: Array, index: int) -> Array:
"""
Adds an input site into the cache, and applies the masked convolution to the cache.
Args:
inputs: an input site to be added into the cache with dimensions (batch, features).
index: the index of the output site. The index of the input site should be `index - self.exclusive`.
Returns:
The next output site with dimensions (batch, features).
"""
L = self.L
index_w = index % L
kernel_h, kernel_w = self.kernel_size
dilation_h, dilation_w = self.kernel_dilation
ones = (1, 1)
if inputs.ndim == 1:
is_single_input = True
inputs = jnp.expand_dims(inputs, axis=0)
else:
is_single_input = False
batch, in_features = inputs.shape
assert in_features % self.feature_group_count == 0
recep_h = (kernel_h - 1) * dilation_h + 1
recep_w = (kernel_w - 1) * dilation_w + 1
if self.use_bias:
bias = self.param(
"bias", self.bias_init, (self.features,), self.param_dtype
)
else:
bias = None
kernel_shape = self.kernel_size + (
in_features // self.feature_group_count,
self.features,
)
kernel = self.param(
"kernel",
wrap_kernel_init(self.kernel_init, self.mask),
kernel_shape,
self.param_dtype,
)
inputs, kernel, bias = promote_dtype(inputs, kernel, bias, dtype=None)
# Initialize the cache with zeros, and the RNG key is None
# `cache.dtype` must be the same as `inputs.dtype` (no promotion)
_cache = self.variable(
"cache",
"inputs",
zeros,
None,
(batch, recep_h, L, in_features),
inputs.dtype,
)
initializing = self.is_mutable_collection("params")
if not initializing:
# Add the input site into the cache
# To write the cache, use `_cache.value` as the left value of the assignment
inputs = jnp.expand_dims(inputs, axis=(1, 2))
# Index of the input site in the width direction
index_w_in = (index - self.exclusive) % L
def _add(cache):
# return cache.at[:, -1, index_w_in, :].set(inputs)
return lax.dynamic_update_slice(cache, inputs, (0, -1, index_w_in, 0))
def _shift(cache):
return jnp.pad(cache[:, 1:, :, :], ((0, 0), (0, 1), (0, 0), (0, 0)))
cache_new_row = jnp.where(
index_w_in == 0, _add(_shift(_cache.value)), _shift(_add(_cache.value))
)
cache_new = jnp.where(index_w == 0, cache_new_row, _add(_cache.value))
_cache.value = jnp.where(
index - self.exclusive >= 0, cache_new, _cache.value
)
cache = _cache.value
# Zero padding
cache = jnp.pad(
cache,
(
(0, 0),
(0, 0),
(kernel_w // 2 * dilation_w, (kernel_w - 1) // 2 * dilation_w),
(0, 0),
),
)
# cache = cache[:, :, index_w : index_w + recep_w, :]
cache = lax.dynamic_slice(
cache, (0, 0, index_w, 0), (batch, recep_h, recep_w, in_features)
)
dimension_numbers = _conv_dimension_numbers(cache.shape)
y_i = lax.conv_general_dilated(
cache,
kernel,
window_strides=ones,
padding="VALID",
lhs_dilation=ones,
rhs_dilation=self.kernel_dilation,
dimension_numbers=dimension_numbers,
feature_group_count=self.feature_group_count,
precision=self.precision,
)
if self.use_bias:
y_i = y_i + bias
y_i = y_i.squeeze(axis=(1, 2))
if is_single_input:
y_i = y_i.squeeze(axis=0)
return y_i
def __call__(self, inputs: Array) -> Array:
"""
Applies the masked convolution to all input sites.
Args:
inputs: input data with dimensions (batch, width, height, features).
Returns:
The convolved data.
"""
return MaskedConv2D.__call__(self, inputs)
|
4880a324a1b9a897a7ea11bdf941d288c0be511a
|
04e5b6df2ee3bcfb7005d8ec91aab8e380333ac4
|
/Extensions/Dependencies/tools/gfortran
|
7572943a989932b3558ca339dde23acbfeb273ab
|
[
"MIT"
] |
permissive
|
ColdGrub1384/Pyto
|
64e2a593957fd640907f0e4698d430ea7754a73e
|
7557485a733dd7e17ba0366b92794931bdb39975
|
refs/heads/main
| 2023-08-01T03:48:35.694832
| 2022-07-20T14:38:45
| 2022-07-20T14:38:45
| 148,944,721
| 884
| 157
|
MIT
| 2023-02-26T21:34:04
| 2018-09-15T22:29:07
|
C
|
UTF-8
|
Python
| false
| false
| 4,183
|
gfortran
|
#!/usr/bin/env python3
import sys
import os
import shlex
import subprocess
import string
import random
import shutil
def join_args(args):
return ' '.join(shlex.quote(x) for x in args)
if "-dumpversion" in sys.argv:
print("10.2.0")
sys.exit(0)
arguments = []
sys_args = sys.argv
del sys_args[0]
try:
for arg in os.environ["ARCHFLAGS"].split(" "):
sys_args.append(arg)
except KeyError:
pass
if "-h" in sys_args or "--help" in sys_args:
print("The running executable emulates a Fortran compiler. If '-arch arm64' is passed, the compiler will produce an arm64 object file for iOS from the passed source. If not, gfortran located in '/usr/local/bin/' will be executed.")
print("Because flang runs in a Docker container, only files under '/Users/', '/var/folders' or '/Library' can be compiled.")
sys.exit(0)
all_args_are_object_files = True
for arg in sys_args:
if os.path.isfile(arg) and not arg.endswith(".o"):
all_args_are_object_files = False
if (not "-c" and not all_args_are_object_files) in sys_args or not "-arch arm64" in " ".join(sys_args):
print("The executed Fortran compiler only supports producing object files for iOS arm64, falling back to gfortran.", file=sys.stderr)
print("To compile sources for iOS arm64, make sure to add -arch arm64 and -c.", file=sys.stderr)
sys.exit(os.system(shlex.join(["/usr/local/bin/gfortran"]+sys_args)))
if "-bundle" in sys_args or all_args_are_object_files:
args = ["clang", "-undefined", "dynamic_lookup", "-shared"]
try:
for arg in os.environ["LDFLAGS"].split(" "):
args.append(arg)
except KeyError:
pass
for arg in sys_args:
if arg != "-bundle":
args.append(arg)
command = shlex.join(args)
sys.exit(os.system(command))
def convert_to_docker_path(arg):
if arg.startswith("/"):
arg = "/¡"+arg
else:
arg = os.path.join(os.getcwd(), arg)
return arg
the_previous_parameter_was_dash_o = False
the_previous_parameter_was_dash_c = False
the_previous_parameter_was_dash_arch = False
output_path = None
source = None
for arg in sys_args:
if arg == "-c":
the_previous_parameter_was_dash_c = True
elif the_previous_parameter_was_dash_c:
the_previous_parameter_was_dash_c = False
source = arg
if arg == "-o":
the_previous_parameter_was_dash_o = True
continue
elif the_previous_parameter_was_dash_o:
the_previous_parameter_was_dash_o = False
output_path = arg
continue
if arg == "-arch":
the_previous_parameter_was_dash_arch = True
continue
elif the_previous_parameter_was_dash_arch:
the_previous_parameter_was_dash_arch = False
continue
if arg == "-fallow-argument-mismatch":
continue
if os.path.exists(arg):
arg = convert_to_docker_path(arg)
if arg.startswith("-I"):
path = arg.split("-I")[-1]
arg = "-I"+convert_to_docker_path(path)
if arg.startswith("/¡"):
arg = arg[2:]
arguments.append(arg)
if output_path is None and source is not None:
parts = source.split(".")
del parts[-1]
output_path = os.getcwd()+"/"+".".join(parts)+".o"
if os.path.isfile(output_path):
sys.exit(0)
print(output_path)
dir = os.path.dirname(os.path.abspath(__file__))
cwd = os.getcwd()
arguments.insert(0, os.path.abspath(os.path.join(dir, "flang.sh")))
arguments.insert(1, "--save-temps")
flang_command = join_args(arguments)
inbox = os.path.join(cwd, ".inbox"+''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(5)))
try:
os.mkdir(inbox)
except FileExistsError:
pass
os.chdir(inbox)
os.system(flang_command)
file_path = None
for file in os.listdir("."):
if not file.endswith(".ll"):
try:
os.remove(file)
except FileNotFoundError:
pass
else:
file_path = os.path.join(os.getcwd(), file)
os.chdir(cwd)
llc = [os.path.join(dir, "llc"), "-mtriple=arm64-apple-ios", "-filetype=obj", file_path, "-o", output_path]
subprocess.run(llc, stdout=None, stderr=None)
shutil.rmtree(inbox)
|
|
5b3fd235aa5fee6d8d90209c226fdf6f45968453
|
83963c19fd120dcc7498b726cc56de7fbb900a47
|
/osxphotos/phototables.py
|
d4f09dfa0f391299d4ebb3b47974e879209473ae
|
[
"MIT",
"LicenseRef-scancode-public-domain"
] |
permissive
|
RhetTbull/osxphotos
|
55ad4f1257bcd26bb3fbadde6ce5dd59c0917354
|
2cb5a4d18a27be6ccf68f5f35abd39418d238016
|
refs/heads/main
| 2023-09-02T18:11:06.227191
| 2023-09-02T16:06:51
| 2023-09-02T16:06:51
| 192,160,985
| 1,287
| 93
|
MIT
| 2023-09-14T14:10:58
| 2019-06-16T07:07:49
|
Python
|
UTF-8
|
Python
| false
| false
| 8,828
|
py
|
phototables.py
|
"""Provide direct access to the database tables associated with a photo."""
from __future__ import annotations
import sqlite3
from typing import Any
import osxphotos
from ._constants import _DB_TABLE_NAMES
def get_table_columns(conn: sqlite3.Connection, table_name: str) -> list[str]:
cursor = conn.cursor()
cursor.execute(f"PRAGMA table_info({table_name})")
return [col[1] for col in cursor.fetchall()]
class PhotoTables:
def __init__(self, photo: osxphotos.PhotoInfo):
"""Create a PhotoTables object.
Args:
db: PhotosDB object
uuid: The UUID of the photo.
"""
self.db = photo._db
self.photo = photo
self.uuid = photo.uuid
self.version = self.db._photos_ver
@property
def ZASSET(self) -> Table:
"""Return the ZASSET table."""
return AssetTable(self.db, self.version, self.uuid)
@property
def ZADDITIONALASSETATTRIBUTES(self) -> Table:
"""Return the ZADDITIONALASSETATTRIBUTES table."""
return AdditionalAttributesTable(self.db, self.version, self.uuid)
@property
def ZDETECTEDFACE(self) -> Table:
"""Return the ZDETECTEDFACE table."""
return DetectedFaceTable(self.db, self.version, self.uuid)
@property
def ZPERSON(self) -> Table:
"""Return the ZPERSON table."""
return PersonTable(self.db, self.version, self.uuid)
class Table:
def __init__(self, db: osxphotos.PhotosDB, version: int, uuid: str):
"""Create a Table object.
Args:
db: PhotosDB object
table_name: The name of the table.
"""
self.db = db
self.conn, _ = self.db.get_db_connection()
self.version = version
self.uuid = uuid
self.asset_table = _DB_TABLE_NAMES[self.version]["ASSET"]
self.columns = [] # must be set in subclass
self.table_name = "" # must be set in subclass
def rows(self) -> list[tuple[Any]]:
"""Return rows for this photo from the table."""
# this should be implemented in the subclass
raise NotImplementedError
def rows_dict(self) -> list[dict[str, Any]]:
"""Return rows for this photo from the table as a list of dicts."""
rows = self.rows()
return [dict(zip(self.columns, row)) for row in rows] if rows else []
def _get_column(self, column: str):
"""Get column value for this photo from the table."""
# this should be implemented in the subclass
raise NotImplementedError
def __getattr__(self, name):
"""Get column value for this photo from the table."""
if name not in self.__dict__ and name in self.columns:
return self._get_column(name)
else:
raise AttributeError(f"Table {self.table_name} has no column {name}")
class AssetTable(Table):
"""ZASSET table."""
def __init__(self, db: osxphotos.PhotosDB, version: int, uuid: str):
"""Create a Table object."""
super().__init__(db, version, uuid)
self.columns = get_table_columns(self.conn, self.asset_table)
self.table_name = self.asset_table
def rows(self) -> list[Any]:
"""Return row2 for this photo from the ZASSET table."""
conn, cursor = self.db.get_db_connection()
cursor.execute(
f"SELECT * FROM {self.asset_table} WHERE ZUUID = ?", (self.uuid,)
)
return result if (result := cursor.fetchall()) else []
def _get_column(self, column: str) -> tuple[Any]:
"""Get column value for this photo from the ZASSET table."""
conn, cursor = self.db.get_db_connection()
cursor.execute(
f"SELECT {column} FROM {self.asset_table} WHERE ZUUID = ?",
(self.uuid,),
)
return (
tuple(result[0] for result in results)
if (results := cursor.fetchall())
else ()
)
class AdditionalAttributesTable(Table):
"""ZADDITIONALASSETATTRIBUTES table."""
def __init__(self, db: osxphotos.PhotosDB, version: int, uuid: str):
"""Create a Table object."""
super().__init__(db, version, uuid)
self.columns = get_table_columns(self.conn, "ZADDITIONALASSETATTRIBUTES")
self.table_name = "ZADDITIONALASSETATTRIBUTES"
def rows(self) -> list[tuple[Any]]:
"""Return rows for this photo from the ZADDITIONALASSETATTRIBUTES table."""
conn, cursor = self.db.get_db_connection()
sql = f""" SELECT ZADDITIONALASSETATTRIBUTES.*
FROM ZADDITIONALASSETATTRIBUTES
JOIN {self.asset_table} ON {self.asset_table}.Z_PK = ZADDITIONALASSETATTRIBUTES.ZASSET
WHERE {self.asset_table}.ZUUID = ?;
"""
cursor.execute(sql, (self.uuid,))
return result if (result := cursor.fetchall()) else []
def _get_column(self, column: str) -> tuple[Any]:
"""Get column value for this photo from the ZADDITIONALASSETATTRIBUTES table."""
conn, cursor = self.db.get_db_connection()
sql = f""" SELECT ZADDITIONALASSETATTRIBUTES.{column}
FROM ZADDITIONALASSETATTRIBUTES
JOIN {self.asset_table} ON {self.asset_table}.Z_PK = ZADDITIONALASSETATTRIBUTES.ZASSET
WHERE {self.asset_table}.ZUUID = ?;
"""
cursor.execute(sql, (self.uuid,))
return (
tuple(result[0] for result in results)
if (results := cursor.fetchall())
else ()
)
class DetectedFaceTable(Table):
"""ZDETECTEDFACE table."""
def __init__(self, db: osxphotos.PhotosDB, version: int, uuid: str):
"""Create a Table object."""
super().__init__(db, version, uuid)
self.columns = get_table_columns(self.conn, "ZDETECTEDFACE")
self.table_name = "ZDETECTEDFACE"
def rows(self) -> list[tuple[Any]]:
"""Return rows for this photo from the ZDETECTEDFACE table."""
conn, cursor = self.db.get_db_connection()
sql = f""" SELECT ZDETECTEDFACE.*
FROM ZDETECTEDFACE
JOIN {self.asset_table} ON {self.asset_table}.Z_PK = ZDETECTEDFACE.ZASSET
WHERE {self.asset_table}.ZUUID = ?;
"""
cursor.execute(sql, (self.uuid,))
return result if (result := cursor.fetchall()) else []
def _get_column(self, column: str) -> tuple[Any]:
"""Get column value for this photo from the ZDETECTEDFACE table."""
conn, cursor = self.db.get_db_connection()
sql = f""" SELECT ZDETECTEDFACE.{column}
FROM ZDETECTEDFACE
JOIN {self.asset_table} ON {self.asset_table}.Z_PK = ZDETECTEDFACE.ZASSET
WHERE {self.asset_table}.ZUUID = ?;
"""
cursor.execute(sql, (self.uuid,))
return (
tuple(result[0] for result in results)
if (results := cursor.fetchall())
else ()
)
class PersonTable(Table):
"""ZPERSON table."""
def __init__(self, db: osxphotos.PhotosDB, version: int, uuid: str):
"""Create a Table object."""
super().__init__(db, version, uuid)
self.columns = get_table_columns(self.conn, "ZPERSON")
self.table_name = "ZPERSON"
def rows(self) -> list[tuple[Any]]:
"""Return rows for this photo from the ZPERSON table."""
conn, cursor = self.db.get_db_connection()
person_fk = _DB_TABLE_NAMES[self.version]["DETECTED_FACE_PERSON_FK"]
asset_fk = _DB_TABLE_NAMES[self.version]["DETECTED_FACE_ASSET_FK"]
sql = f""" SELECT ZPERSON.*
FROM ZPERSON
JOIN ZDETECTEDFACE ON {person_fk} = ZPERSON.Z_PK
JOIN ZASSET ON ZASSET.Z_PK = {asset_fk}
WHERE {self.asset_table}.ZUUID = ?;
"""
cursor.execute(sql, (self.uuid,))
return result if (result := cursor.fetchall()) else []
def _get_column(self, column: str) -> tuple[Any]:
"""Get column value for this photo from the ZPERSON table."""
conn, cursor = self.db.get_db_connection()
person_fk = _DB_TABLE_NAMES[self.version]["DETECTED_FACE_PERSON_FK"]
asset_fk = _DB_TABLE_NAMES[self.version]["DETECTED_FACE_ASSET_FK"]
sql = f""" SELECT ZPERSON.{column}
FROM ZPERSON
JOIN ZDETECTEDFACE ON {person_fk} = ZPERSON.Z_PK
JOIN ZASSET ON ZASSET.Z_PK = {asset_fk}
WHERE {self.asset_table}.ZUUID = ?;
"""
cursor.execute(sql, (self.uuid,))
return (
tuple(result[0] for result in results)
if (results := cursor.fetchall())
else ()
)
|
c72fff19a1908e45711fbc199114e6029f29e7ea
|
ed36064525bad62959d9ab739edeea477bf29c1c
|
/2017-11-09-defcamp-final/hack_tac_toe/toe.py
|
218cfc549bad49c2fda7179daac3c3a9c5f59856
|
[] |
no_license
|
p4-team/ctf
|
2dae496622c8403d7539b21f0e9a286e9889195a
|
8280caff137e42b26cb55f2c62411c7c512088de
|
refs/heads/master
| 2023-08-12T03:21:31.021612
| 2023-04-26T23:57:29
| 2023-04-26T23:57:29
| 42,933,477
| 1,899
| 366
| null | 2022-06-07T21:51:40
| 2015-09-22T12:53:15
|
Python
|
UTF-8
|
Python
| false
| false
| 724
|
py
|
toe.py
|
import requests
from crypto_commons.generic import xor_string
s = requests.Session()
def get_state():
return requests.utils.unquote(s.cookies['Encrypted_Game_Session']).decode('base64')
def main():
s.get('http://hacktactoe.dctf-f1nals-2017.def.camp/action.php?action=init')
original_state = get_state()
pt = 'a' * 100
s.get('http://hacktactoe.dctf-f1nals-2017.def.camp/action.php?name=' + pt)
state_with_long_name = get_state()
matching_block = state_with_long_name[112:112 + len(pt)]
keystream = xor_string(matching_block, pt)
print(xor_string(original_state[112:], keystream))
print(keystream.encode("hex"))
print(xor_string(original_state, keystream[:32] * 10))
main()
|
2783b262c575062cbc56a87934da3fbabb65661f
|
a2b429075098ef615a104845b8434e7fdeff9d14
|
/antspynet/utilities/histology.py
|
e5ceb71c683323adbd46242a27632aa93adf3026
|
[
"Apache-2.0"
] |
permissive
|
ANTsX/ANTsPyNet
|
de95ec1ceca6bd146b99127c36273ba4649be40b
|
1703acb58ed053ce3348aa061e4087bac953dd07
|
refs/heads/master
| 2023-08-09T17:26:33.179674
| 2023-08-04T14:22:18
| 2023-08-04T14:22:18
| 189,067,098
| 171
| 36
|
Apache-2.0
| 2023-07-13T15:57:34
| 2019-05-28T16:44:24
|
Python
|
UTF-8
|
Python
| false
| false
| 28,682
|
py
|
histology.py
|
import numpy as np
import ants
import warnings
def arterial_lesion_segmentation(image,
antsxnet_cache_directory=None,
verbose=False):
"""
Perform arterial lesion segmentation using U-net.
Arguments
---------
image : ANTsImage
input image
antsxnet_cache_directory : string
Destination directory for storing the downloaded template and model weights.
Since these can be reused, if is None, these data will be downloaded to a
~/.keras/ANTsXNet/.
verbose : boolean
Print progress to the screen.
Returns
-------
Foreground probability image.
Example
-------
>>> output = arterial_lesion_segmentation(histology_image)
"""
from ..architectures import create_unet_model_2d
from ..utilities import get_pretrained_network
if image.dimension != 2:
raise ValueError( "Image dimension must be 2." )
channel_size = 1
weights_file_name = get_pretrained_network("arterialLesionWeibinShi",
antsxnet_cache_directory=antsxnet_cache_directory)
resampled_image_size = (512, 512)
unet_model = create_unet_model_2d((*resampled_image_size, channel_size),
number_of_outputs=1, mode="sigmoid",
number_of_filters=(64, 96, 128, 256, 512),
convolution_kernel_size=(3, 3), deconvolution_kernel_size=(2, 2),
dropout_rate=0.0, weight_decay=0,
additional_options=("initialConvolutionKernelSize[5]", "attentionGating"))
unet_model.load_weights(weights_file_name)
if verbose == True:
print("Preprocessing: Resampling and N4 bias correction.")
preprocessed_image = ants.image_clone(image)
preprocessed_image = preprocessed_image / preprocessed_image.max()
preprocessed_image = ants.resample_image(preprocessed_image, resampled_image_size, use_voxels=True, interp_type=0)
mask = ants.image_clone(preprocessed_image) * 0 + 1
preprocessed_image = ants.n4_bias_field_correction(preprocessed_image, mask=mask, shrink_factor=2, return_bias_field=False, verbose=verbose)
batchX = np.expand_dims(preprocessed_image.numpy(), axis=0)
batchX = np.expand_dims(batchX, axis=-1)
batchX = (batchX - batchX.min()) / (batchX.max() - batchX.min())
predicted_data = unet_model.predict(batchX, verbose=int(verbose))
origin = preprocessed_image.origin
spacing = preprocessed_image.spacing
direction = preprocessed_image.direction
foreground_probability_image = ants.from_numpy(np.squeeze(predicted_data[0, :, :, 0]),
origin=origin, spacing=spacing, direction=direction)
if verbose == True:
print("Post-processing: resampling to original space.")
foreground_probability_image = ants.resample_image_to_target(foreground_probability_image, image)
return(foreground_probability_image)
def allen_ex5_brain_extraction(image,
view = "sagittal",
which_axis=2,
antsxnet_cache_directory=None,
verbose=False):
"""
Perform brain extraction of Allen's E13.5 and E15.5 mouse embroyonic data.
Arguments
---------
image : ANTsImage
input image
view : string
Two trained networks are available: "coronal" or "sagittal".
which_axis : integer
If 3-D image, which_axis specifies the direction of the "view".
antsxnet_cache_directory : string
Destination directory for storing the downloaded template and model weights.
Since these can be reused, if is None, these data will be downloaded to a
~/.keras/ANTsXNet/.
verbose : boolean
Print progress to the screen.
Returns
-------
Foreground probability image.
Example
-------
>>> output = allen_e13x5_brain_extraction(histology_image)
"""
from ..architectures import create_unet_model_2d
from ..utilities import get_pretrained_network
if which_axis < 0 or which_axis > 2:
raise ValueError("Chosen axis not supported.")
weights_file_name = ""
if view.lower() == "coronal":
weights_file_name = get_pretrained_network("ex5_coronal_weights",
antsxnet_cache_directory=antsxnet_cache_directory)
elif view.lower() == "sagittal":
weights_file_name = get_pretrained_network("ex5_sagittal_weights",
antsxnet_cache_directory=antsxnet_cache_directory)
else:
raise ValueError("Valid view options are coronal and sagittal.")
resampled_image_size = (512, 512)
original_slice_shape = image.shape
if image.dimension > 2:
original_slice_shape = tuple(np.delete(np.array(image.shape), which_axis))
unet_model = create_unet_model_2d((*resampled_image_size, 1),
number_of_outputs=2, mode="classification",
number_of_filters=(64, 96, 128, 256, 512),
convolution_kernel_size=(3, 3), deconvolution_kernel_size=(2, 2),
dropout_rate=0.0, weight_decay=0,
additional_options=("initialConvolutionKernelSize[5]", "attentionGating"))
unet_model.load_weights(weights_file_name)
if verbose:
print("Preprocessing: Resampling.")
number_of_channels = image.components
number_of_slices = 1
if image.dimension > 2:
number_of_slices = image.shape[which_axis]
image_channels = list()
if number_of_channels == 1:
image_channels.append(image)
else:
image_channels = ants.split_channels(image)
batch_X = np.zeros((number_of_channels * number_of_slices, *resampled_image_size, 1))
count = 0
for i in range(number_of_channels):
image_channel_array = image_channels[i].numpy()
for j in range(number_of_slices):
slice = None
if image.dimension > 2:
if which_axis == 0:
image_channel_slice_array = np.squeeze(image_channel_array[j,:,:])
elif which_axis == 1:
image_channel_slice_array = np.squeeze(image_channel_array[:,j,:])
else:
image_channel_slice_array = np.squeeze(image_channel_array[:,:,j])
slice = ants.from_numpy(image_channel_slice_array)
else:
slice = image_channels[i]
if slice.max() > slice.min():
slice_resampled = ants.resample_image(slice, resampled_image_size, use_voxels=True, interp_type=0)
slice_array = slice_resampled.numpy()
slice_array = (slice_array - slice_array.min()) / (slice_array.max() - slice_array.min())
batch_X[count,:,:,0] = slice_array
count = count + 1
if verbose:
print("Prediction: ")
predicted_data = unet_model.predict(batch_X, verbose=int(verbose))
if number_of_channels > 1:
if verbose:
print("Averaging across channels.")
predicted_data_temp = np.split(predicted_data, number_of_channels, axis=0)
predicted_data = np.zeros((number_of_slices, *resampled_image_size, 1))
for i in range(number_of_channels):
predicted_data = (predicted_data * i + predicted_data_temp[i]) / (i + 1)
if verbose:
print("Post-processing: resampling to original space.")
foreground_probability_array = np.zeros(image.shape)
for j in range(number_of_slices):
slice_resampled = ants.from_numpy(np.squeeze(predicted_data[j,:,:,1]))
slice = ants.resample_image(slice_resampled, original_slice_shape, use_voxels=True, interp_type=0)
if image.dimension == 2:
foreground_probability_array[:,:] = slice.numpy()
else:
if which_axis == 0:
foreground_probability_array[j,:,:] = slice.numpy()
elif which_axis == 1:
foreground_probability_array[:,j,:] = slice.numpy()
else:
foreground_probability_array[:,:,j] = slice.numpy()
origin = image.origin
spacing = image.spacing
direction = image.direction
foreground_probability_image = ants.from_numpy(foreground_probability_array,
origin=origin, spacing=spacing, direction=direction)
return(foreground_probability_image)
def allen_histology_brain_mask(image,
which_axis=2,
antsxnet_cache_directory=None,
verbose=False):
"""
Determine brain foreground of Allen's mouse data.
Arguments
---------
image : ANTsImage
input image
which_axis : integer
If 3-D image, which_axis specifies the direction of the "view".
antsxnet_cache_directory : string
Destination directory for storing the downloaded template and model weights.
Since these can be reused, if is None, these data will be downloaded to a
~/.keras/ANTsXNet/.
verbose : boolean
Print progress to the screen.
Returns
-------
Foreground probability image.
Example
-------
>>> output = allen_histology_brain_mask(histology_image)
"""
from ..architectures import create_unet_model_2d
from ..utilities import get_pretrained_network
if which_axis < 0 or which_axis > 2:
raise ValueError("Chosen axis not supported.")
weights_file_name = get_pretrained_network("allen_brain_mask_weights",
antsxnet_cache_directory=antsxnet_cache_directory)
resampled_image_size = (512, 512)
original_slice_shape = image.shape
if image.dimension > 2:
original_slice_shape = tuple(np.delete(np.array(image.shape), which_axis))
unet_model = create_unet_model_2d((*resampled_image_size, 1),
number_of_outputs=2, mode="classification",
number_of_filters=(64, 96, 128, 256, 512),
convolution_kernel_size=(3, 3), deconvolution_kernel_size=(2, 2),
dropout_rate=0.0, weight_decay=0,
additional_options=("initialConvolutionKernelSize[5]", "attentionGating"))
unet_model.load_weights(weights_file_name)
if verbose:
print("Preprocessing: Resampling.")
number_of_channels = image.components
number_of_slices = 1
if image.dimension > 2:
number_of_slices = image.shape[which_axis]
image_channels = list()
if number_of_channels == 1:
image_channels.append(image)
else:
image_channels = ants.split_channels(image)
batch_X = np.zeros((number_of_channels * number_of_slices, *resampled_image_size, 1))
count = 0
for i in range(number_of_channels):
image_channel_array = image_channels[i].numpy()
for j in range(number_of_slices):
slice = None
if image.dimension > 2:
if which_axis == 0:
image_channel_slice_array = np.squeeze(image_channel_array[j,:,:])
elif which_axis == 1:
image_channel_slice_array = np.squeeze(image_channel_array[:,j,:])
else:
image_channel_slice_array = np.squeeze(image_channel_array[:,:,j])
slice = ants.from_numpy(image_channel_slice_array)
else:
slice = image_channels[i]
if slice.max() > slice.min():
slice_resampled = ants.resample_image(slice, resampled_image_size, use_voxels=True, interp_type=0)
slice_array = slice_resampled.numpy()
slice_array = (slice_array - slice_array.min()) / (slice_array.max() - slice_array.min())
batch_X[count,:,:,0] = slice_array
count = count + 1
if verbose:
print("Prediction: ")
predicted_data = unet_model.predict(batch_X, verbose=int(verbose))
if number_of_channels > 1:
if verbose:
print("Averaging across channels.")
predicted_data_temp = np.split(predicted_data, number_of_channels, axis=0)
predicted_data = np.zeros((number_of_slices, *resampled_image_size, 1))
for i in range(number_of_channels):
predicted_data = (predicted_data * i + predicted_data_temp[i]) / (i + 1)
if verbose:
print("Post-processing: resampling to original space.")
foreground_probability_array = np.zeros(image.shape)
for j in range(number_of_slices):
slice_resampled = ants.from_numpy(np.squeeze(predicted_data[j,:,:,1]))
slice = ants.resample_image(slice_resampled, original_slice_shape, use_voxels=True, interp_type=0)
if image.dimension == 2:
foreground_probability_array[:,:] = slice.numpy()
else:
if which_axis == 0:
foreground_probability_array[j,:,:] = slice.numpy()
elif which_axis == 1:
foreground_probability_array[:,j,:] = slice.numpy()
else:
foreground_probability_array[:,:,j] = slice.numpy()
origin = image.origin
spacing = image.spacing
direction = image.direction
foreground_probability_image = ants.from_numpy(foreground_probability_array,
origin=origin, spacing=spacing, direction=direction)
return(foreground_probability_image)
def allen_histology_hemispherical_coronal_mask(image,
which_axis=2,
antsxnet_cache_directory=None,
verbose=False):
"""
Determine left and right hemisphere brain masks of Allen's mouse data in coronal
acquisitions for both P* and E*x5 data. This assumes that the original histology
image has been pre-extracted.
Arguments
---------
image : ANTsImage
input image
which_axis : integer
If 3-D image, which_axis specifies the direction of the coronal view.
antsxnet_cache_directory : string
Destination directory for storing the downloaded template and model weights.
Since these can be reused, if is None, these data will be downloaded to a
~/.keras/ANTsXNet/.
verbose : boolean
Print progress to the screen.
Returns
-------
Foreground probability image.
Example
-------
>>> output = allen_histology_hemispherical_coronal_mask(histology_image)
"""
from ..architectures import create_unet_model_2d
from ..utilities import get_pretrained_network
if which_axis < 0 or which_axis > 2:
raise ValueError("Chosen axis not supported.")
weights_file_name = get_pretrained_network("allen_brain_leftright_coronal_mask_weights",
antsxnet_cache_directory=antsxnet_cache_directory)
resampled_image_size = (512, 512)
original_slice_shape = image.shape
if image.dimension > 2:
original_slice_shape = tuple(np.delete(np.array(image.shape), which_axis))
classes = (0, 1, 2)
number_of_classification_labels = len(classes)
unet_model = create_unet_model_2d((*resampled_image_size, 1),
number_of_outputs=number_of_classification_labels, mode="classification",
number_of_filters=(64, 96, 128, 256, 512),
convolution_kernel_size=(3, 3), deconvolution_kernel_size=(2, 2),
dropout_rate=0.0, weight_decay=0,
additional_options=("initialConvolutionKernelSize[5]", "attentionGating"))
unet_model.load_weights(weights_file_name)
if verbose:
print("Preprocessing: Resampling.")
number_of_channels = image.components
number_of_slices = 1
if image.dimension > 2:
number_of_slices = image.shape[which_axis]
image_channels = list()
if number_of_channels == 1:
image_channels.append(image)
else:
image_channels = ants.split_channels(image)
batch_X = np.zeros((number_of_channels * number_of_slices, *resampled_image_size, 1))
count = 0
for i in range(number_of_channels):
image_channel_array = image_channels[i].numpy()
for j in range(number_of_slices):
slice = None
if image.dimension > 2:
if which_axis == 0:
image_channel_slice_array = np.squeeze(image_channel_array[j,:,:])
elif which_axis == 1:
image_channel_slice_array = np.squeeze(image_channel_array[:,j,:])
else:
image_channel_slice_array = np.squeeze(image_channel_array[:,:,j])
slice = ants.from_numpy(image_channel_slice_array)
else:
slice = image_channels[i]
if slice.max() > slice.min():
slice_resampled = ants.resample_image(slice, resampled_image_size, use_voxels=True, interp_type=0)
slice_smoothed = ants.smooth_image(slice_resampled, 1.0)
slice_array = slice_smoothed.numpy()
slice_array = (slice_array - slice_array.min()) / (slice_array.max() - slice_array.min())
batch_X[count,:,:,0] = slice_array
count = count + 1
if verbose:
print("Prediction: ")
predicted_data = unet_model.predict(batch_X, verbose=int(verbose))
if number_of_channels > 1:
if verbose:
print("Averaging across channels.")
predicted_data_temp = np.split(predicted_data, number_of_channels, axis=0)
predicted_data = np.zeros((number_of_slices, *resampled_image_size, 1))
for i in range(number_of_channels):
predicted_data = (predicted_data * i + predicted_data_temp[i]) / (i + 1)
if verbose:
print("Post-processing: resampling to original space.")
origin = image.origin
spacing = image.spacing
direction = image.direction
probability_images = list()
for i in range(len(classes)):
if verbose == True:
print("Reconstructing image", classes[i])
probability_image_array = np.zeros(image.shape)
for j in range(number_of_slices):
slice_resampled = ants.from_numpy(np.squeeze(predicted_data[j,:,:,i]))
slice = ants.resample_image(slice_resampled, original_slice_shape, use_voxels=True, interp_type=0)
if image.dimension == 2:
probability_image_array[:,:] = slice.numpy()
else:
if which_axis == 0:
probability_image_array[j,:,:] = slice.numpy()
elif which_axis == 1:
probability_image_array[:,j,:] = slice.numpy()
else:
probability_image_array[:,:,j] = slice.numpy()
probability_images.append(ants.from_numpy(probability_image_array,
origin=origin, spacing=spacing, direction=direction))
image_matrix = ants.image_list_to_matrix(probability_images, image_channels[0] * 0 + 1)
segmentation_matrix = np.argmax(image_matrix, axis=0)
segmentation_image = ants.matrix_to_images(
np.expand_dims(segmentation_matrix, axis=0), image_channels[0] * 0 + 1)[0]
return_dict = {'segmentation_image' : segmentation_image,
'probability_images' : probability_images}
return(return_dict)
def allen_histology_cerebellum_mask(image,
which_axis=2,
view = 'sagittal',
antsxnet_cache_directory=None,
verbose=False):
"""
Determine cerebellum foreground of Allen's mouse data.
Arguments
---------
image : ANTsImage
input image
which_axis : integer
If 3-D image, which_axis specifies the direction of the "view".
antsxnet_cache_directory : string
Destination directory for storing the downloaded template and model weights.
Since these can be reused, if is None, these data will be downloaded to a
~/.keras/ANTsXNet/.
verbose : boolean
Print progress to the screen.
Returns
-------
Foreground probability image.
Example
-------
>>> output = allen_histology_cerebellum_mask(histology_image)
"""
from ..architectures import create_unet_model_2d
from ..utilities import get_pretrained_network
if which_axis < 0 or which_axis > 2:
raise ValueError("Chosen axis not supported.")
weights_file_name = None
if view == "sagittal":
weights_file_name = get_pretrained_network("allen_cerebellum_sagittal_mask_weights",
antsxnet_cache_directory=antsxnet_cache_directory)
elif view == "coronal":
weights_file_name = get_pretrained_network("allen_cerebellum_coronal_mask_weights",
antsxnet_cache_directory=antsxnet_cache_directory)
else:
raise ValueError("Unrecognized option for view. Must be sagittal or coronal.")
resampled_image_size = (512, 512)
original_slice_shape = image.shape
if image.dimension > 2:
original_slice_shape = tuple(np.delete(np.array(image.shape), which_axis))
unet_model = create_unet_model_2d((*resampled_image_size, 1),
number_of_outputs=1, mode="sigmoid",
number_of_filters=(64, 96, 128, 256, 512),
convolution_kernel_size=(3, 3), deconvolution_kernel_size=(2, 2),
dropout_rate=0.0, weight_decay=0,
additional_options=("initialConvolutionKernelSize[5]", "attentionGating"))
unet_model.load_weights(weights_file_name)
if verbose:
print("Preprocessing: Resampling.")
number_of_channels = image.components
number_of_slices = 1
if image.dimension > 2:
number_of_slices = image.shape[which_axis]
image_channels = list()
if number_of_channels == 1:
image_channels.append(image)
else:
image_channels = ants.split_channels(image)
batch_X = np.zeros((number_of_channels * number_of_slices, *resampled_image_size, 1))
count = 0
for i in range(number_of_channels):
image_channel_array = image_channels[i].numpy()
for j in range(number_of_slices):
slice = None
if image.dimension > 2:
if which_axis == 0:
image_channel_slice_array = np.squeeze(image_channel_array[j,:,:])
elif which_axis == 1:
image_channel_slice_array = np.squeeze(image_channel_array[:,j,:])
else:
image_channel_slice_array = np.squeeze(image_channel_array[:,:,j])
slice = ants.from_numpy(image_channel_slice_array)
else:
slice = image_channels[i]
if slice.max() > slice.min():
slice_resampled = ants.resample_image(slice, resampled_image_size, use_voxels=True, interp_type=0)
slice_array = slice_resampled.numpy()
slice_array = (slice_array - slice_array.min()) / (slice_array.max() - slice_array.min())
batch_X[count,:,:,0] = slice_array
count = count + 1
if verbose:
print("Prediction: ")
predicted_data = unet_model.predict(batch_X, verbose=int(verbose))
if number_of_channels > 1:
if verbose:
print("Averaging across channels.")
predicted_data_temp = np.split(predicted_data, number_of_channels, axis=0)
predicted_data = np.zeros((number_of_slices, *resampled_image_size, 1))
for i in range(number_of_channels):
predicted_data = (predicted_data * i + predicted_data_temp[i]) / (i + 1)
if verbose:
print("Post-processing: resampling to original space.")
foreground_probability_array = np.zeros(image.shape)
for j in range(number_of_slices):
slice_resampled = ants.from_numpy(np.squeeze(predicted_data[j,:,:,0]))
slice = ants.resample_image(slice_resampled, original_slice_shape, use_voxels=True, interp_type=0)
if image.dimension == 2:
foreground_probability_array[:,:] = slice.numpy()
else:
if which_axis == 0:
foreground_probability_array[j,:,:] = slice.numpy()
elif which_axis == 1:
foreground_probability_array[:,j,:] = slice.numpy()
else:
foreground_probability_array[:,:,j] = slice.numpy()
origin = image.origin
spacing = image.spacing
direction = image.direction
foreground_probability_image = ants.from_numpy(foreground_probability_array,
origin=origin, spacing=spacing, direction=direction)
return(foreground_probability_image)
def allen_histology_super_resolution(image,
antsxnet_cache_directory=None,
verbose=False):
"""
Super resolution (2x) of a single image slice (256x256 -> 512x512)
Arguments
---------
image : ANTsImage or ANTsImage list
input image or input image list
antsxnet_cache_directory : string
Destination directory for storing the downloaded template and model weights.
Since these can be reused, if is None, these data will be downloaded to a
~/.keras/ANTsXNet/.
verbose : boolean
Print progress to the screen.
Returns
-------
Super resolution image of size 512x512 voxels (or a list depending on the input)
Example
-------
>>> output = allen_histology_super_resolution(histology_image)
"""
from ..architectures import create_deep_back_projection_network_model_2d
from ..utilities import get_pretrained_network
from ..utilities import regression_match_image
image_list = list()
if isinstance(image, list):
image_list = image
else:
image_list.append(image)
lr_image_size = (256, 256)
sr_image_size = (512, 512)
image_lr_list = list()
for i in range(len(image_list)):
if image_list[i].components != 3:
raise ValueError("Number of image channels should be 3 (rgb).")
if image_list[i].dimension != 2:
raise ValueError("Input image should be 2-D.")
do_resample = False
if image_list[i].shape != lr_image_size:
warnings.warn("Resampling input image to (256, 256).")
do_resample = True
image_lr = ants.image_clone(image_list[i])
image_lr_channels = ants.split_channels(image_lr)
for c in range(len(image_lr_channels)):
if do_resample:
image_lr_channels[c] = ants.resample_image(image_lr_channels[c], resample_params=lr_image_size,
use_voxels=True, interp_type=0)
image_lr_channels[c] = ((image_lr_channels[c] - image_lr_channels[c].min()) /
(image_lr_channels[c].max() - image_lr_channels[c].min()))
image_lr = ants.merge_channels(image_lr_channels)
image_lr_list.append(image_lr)
weights_file_name = get_pretrained_network("allen_sr_weights",
antsxnet_cache_directory=antsxnet_cache_directory)
sr_model = create_deep_back_projection_network_model_2d((*lr_image_size, 3),
number_of_outputs=3, convolution_kernel_size=(6, 6), strides=(2, 2))
sr_model.load_weights(weights_file_name)
batch_X = np.zeros((len(image_lr_list), *lr_image_size, 3))
for i in range(len(image_lr_list)):
batch_X[i,:,:,:] = image_lr_list[i].numpy()
if verbose:
print("Prediction: ")
predicted_data = sr_model.predict(batch_X, verbose=int(verbose))
if verbose:
print("Regression match output image.")
spacing_factor = (lr_image_size[0] - 1) / (sr_image_size[0] - 1)
image_sr_list = list()
for i in range(len(image_lr_list)):
image_sr = ants.from_numpy(predicted_data[i,:,:,:], origin=image_list[i].origin,
direction=image_list[i].direction,
spacing=(spacing_factor * image_list[i].spacing[0],
spacing_factor * image_list[i].spacing[1]),
has_components=True)
image_channels = ants.split_channels(image_list[i])
image_sr_channels = ants.split_channels(image_sr)
for c in range(len(image_sr_channels)):
image_lr_channel_resampled = ants.resample_image(image_channels[c], resample_params=sr_image_size,
use_voxels=True, interp_type=0)
image_sr_channels[c] = regression_match_image(image_sr_channels[c], image_lr_channel_resampled)
image_sr_list.append(ants.merge_channels(image_sr_channels))
if isinstance(image, list):
return(image_sr_list)
else:
return(image_sr_list[0])
|
9272a2a157e45cd05091ca47448c140237120efe
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/response/AlipayInsDataAutoFraudQueryResponse.py
|
66801e8c42dc4e45c839bad0e1cf26318f653c11
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,702
|
py
|
AlipayInsDataAutoFraudQueryResponse.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.CaseInfoCode import CaseInfoCode
class AlipayInsDataAutoFraudQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayInsDataAutoFraudQueryResponse, self).__init__()
self._fraud_tag = None
self._fraud_tag_level = None
self._info_code_list = None
@property
def fraud_tag(self):
return self._fraud_tag
@fraud_tag.setter
def fraud_tag(self, value):
self._fraud_tag = value
@property
def fraud_tag_level(self):
return self._fraud_tag_level
@fraud_tag_level.setter
def fraud_tag_level(self, value):
self._fraud_tag_level = value
@property
def info_code_list(self):
return self._info_code_list
@info_code_list.setter
def info_code_list(self, value):
if isinstance(value, list):
self._info_code_list = list()
for i in value:
if isinstance(i, CaseInfoCode):
self._info_code_list.append(i)
else:
self._info_code_list.append(CaseInfoCode.from_alipay_dict(i))
def parse_response_content(self, response_content):
response = super(AlipayInsDataAutoFraudQueryResponse, self).parse_response_content(response_content)
if 'fraud_tag' in response:
self.fraud_tag = response['fraud_tag']
if 'fraud_tag_level' in response:
self.fraud_tag_level = response['fraud_tag_level']
if 'info_code_list' in response:
self.info_code_list = response['info_code_list']
|
b25fbfba1f850a24282b97fb977d242372dc8181
|
beab4b9703df6c4e9bda54fada11a6d985ea2c5a
|
/tests/test_json_encoding.py
|
f581b3e383e800751390d2b41ad253456fa430f6
|
[
"MIT"
] |
permissive
|
sanic-org/sanic
|
d3db62482914061a1f6a8f7d94b6127c2876cb3e
|
47215d4635184bdfb1d5cff000d19390f19219ab
|
refs/heads/main
| 2023-09-05T01:04:31.432228
| 2023-08-30T17:03:22
| 2023-08-30T17:03:22
| 59,720,190
| 3,523
| 439
|
MIT
| 2023-09-14T05:45:11
| 2016-05-26T04:38:22
|
Python
|
UTF-8
|
Python
| false
| false
| 2,501
|
py
|
test_json_encoding.py
|
import sys
from dataclasses import asdict, dataclass
from functools import partial
from json import dumps as sdumps
from string import ascii_lowercase
from typing import Dict
import pytest
try:
import ujson
from ujson import dumps as udumps
ujson_version = tuple(
map(int, ujson.__version__.strip(ascii_lowercase).split("."))
)
NO_UJSON = False
DEFAULT_DUMPS = udumps
except ModuleNotFoundError:
NO_UJSON = True
DEFAULT_DUMPS = partial(sdumps, separators=(",", ":"))
ujson_version = None
from sanic import Sanic
from sanic.response import BaseHTTPResponse, json
@dataclass
class Foo:
bar: str
def __json__(self):
return udumps(asdict(self))
@pytest.fixture
def foo():
return Foo(bar="bar")
@pytest.fixture
def payload(foo: Foo):
return {"foo": foo}
@pytest.fixture(autouse=True)
def default_back_to_ujson():
yield
BaseHTTPResponse._dumps = DEFAULT_DUMPS
def test_change_encoder():
Sanic("Test", dumps=sdumps)
assert BaseHTTPResponse._dumps == sdumps
def test_change_encoder_to_some_custom():
def my_custom_encoder():
return "foo"
Sanic("Test", dumps=my_custom_encoder)
assert BaseHTTPResponse._dumps == my_custom_encoder
@pytest.mark.skipif(NO_UJSON is True, reason="ujson not installed")
def test_json_response_ujson(payload: Dict[str, Foo]):
"""ujson will look at __json__"""
response = json(payload)
assert response.body == b'{"foo":{"bar":"bar"}}'
with pytest.raises(
TypeError, match="Object of type Foo is not JSON serializable"
):
json(payload, dumps=sdumps)
Sanic("Test", dumps=sdumps)
with pytest.raises(
TypeError, match="Object of type Foo is not JSON serializable"
):
json(payload)
@pytest.mark.skipif(
NO_UJSON is True or ujson_version >= (5, 4, 0),
reason=(
"ujson not installed or version is 5.4.0 or newer, "
"which can handle arbitrary size integers"
),
)
def test_json_response_json():
"""One of the easiest ways to tell the difference is that ujson cannot
serialize over 64 bits"""
too_big_for_ujson = 111111111111111111111
with pytest.raises(OverflowError, match="int too big to convert"):
json(too_big_for_ujson)
response = json(too_big_for_ujson, dumps=sdumps)
assert sys.getsizeof(response.body) == 54
Sanic("Test", dumps=sdumps)
response = json(too_big_for_ujson)
assert sys.getsizeof(response.body) == 54
|
136287c7713b128c11e4ebde004e1029259a80dd
|
d068d41e02ab116cbd83ee9298c9ba357c668f85
|
/ipypublish/tests/__init__.py
|
ea0d1483f847ae8608c5d8e49343c09e06ae1550
|
[
"BSD-3-Clause"
] |
permissive
|
chrisjsewell/ipypublish
|
01f362cdf0989e119111a089bb307f52e23c1ef7
|
53fa92c4c7f18e36d8a9790b10de27219882f4e4
|
refs/heads/develop
| 2022-02-08T04:26:32.081511
| 2020-08-14T01:18:09
| 2020-08-14T01:18:09
| 96,322,423
| 233
| 42
|
BSD-3-Clause
| 2021-11-20T18:58:33
| 2017-07-05T13:29:38
|
HTML
|
UTF-8
|
Python
| false
| false
| 161
|
py
|
__init__.py
|
import os
TEST_FILES_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_files")
TEST_PIC_PATH = os.path.join(TEST_FILES_DIR, "example.jpg")
|
819fef3c3d4ec30dbcae644781dcdb4ae3d7a3dd
|
c89fe3e0595cffefda18e12a28e7aa09faadf783
|
/igvc_perception/src/train_eval/IGVCDataset.py
|
bc8396dd2e05af3c173f6da2228fb354ea4f819d
|
[
"MIT"
] |
permissive
|
RoboJackets/igvc-software
|
5923413dba07d0839628739b380cb08a3a158f57
|
bd65158fb92d75cae4c6fea9cd330291e12701fc
|
refs/heads/master
| 2022-08-19T16:49:15.798203
| 2022-07-31T00:18:59
| 2022-07-31T00:18:59
| 16,996,386
| 111
| 159
|
MIT
| 2022-07-31T00:19:00
| 2014-02-19T19:44:06
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,181
|
py
|
IGVCDataset.py
|
import cv2
import datetime
import glob
import ntpath
import numpy as np
import os
import pickle
from PIL import Image
import pdb
import random
import torch.utils.data as data
class IGVCDataset(data.Dataset):
def __init__(
self,
root,
im_size,
split="train",
transform=None,
val_samples=1,
preprocessor=None,
):
self.root = root
self.transform = transform
self.split = split
self.im_size = im_size
self.val_samples = val_samples
self.preprocessor = preprocessor
# Root contains a list of images to be used for the dataset.
with open(root, "r") as file:
self.lines = file.read().splitlines()
if self.split in ["train", "val"]:
random.seed(4)
random.shuffle(self.lines)
self.get_paths()
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
if self.split == "train":
img_path, target_path = self.train_data[index], self.train_labels[index]
elif self.split == "val":
img_path, target_path = self.val_data[index], self.val_labels[index]
elif self.split == "test":
img_path, target_path = self.test_data[index], self.test_labels[index]
# TODO: Make annotation grayscale so we don't need this hardcoded layer.
try:
img = cv2.imread(img_path)
except:
pdb.set_trace()
if self.preprocessor is not None:
img = self.preprocessor(img)
target = cv2.imread(target_path)
# randomly flip the image vertically sometimes
if np.random.randint(2) == 1:
img = cv2.flip(img, 1)
target = cv2.flip(target, 1)
# Access to red color only (OpenCV: BGR)
# This is because the lines are categorized as red color
# And we are interested in lines for the line detection.
target = target[:, :, 2]
img = cv2.resize(img, (self.im_size[1], self.im_size[2]))
target = cv2.resize(target, (self.im_size[1], self.im_size[2]))
target[target != 0] = 255
# cv2.imshow("original image", img)
# cv2.imshow("target", target)
# #Check if the image is collectlly displayed as a binary image
# cv2.waitKey(0)
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img)
target = Image.fromarray(target)
if self.transform is not None:
img = self.transform(img)
target = self.transform(target)
return img, target
def __len__(self):
if self.split == "train":
return len(self.train_data)
elif self.split == "val":
return len(self.val_data)
elif self.split == "test":
return len(self.test_data)
def get_paths(self):
print("Identifying %s dataset." % self.split)
data = []
labels = []
# Get the corresponding label for each image.
for line in self.lines:
imgpath = line
img_filename = ntpath.basename(imgpath)
anno_filename = img_filename.replace("jpg", "png")
labpath = imgpath.replace("imgs", "annos").replace(
img_filename, anno_filename
)
if not os.path.exists(labpath):
print("Could not find label for %s." % imgpath)
continue
data.append(imgpath)
labels.append(labpath)
if self.split in ["train", "val"]:
self.train_data = data
self.train_labels = labels
self.val_data = self.train_data[-self.val_samples :]
self.val_labels = self.train_labels[-self.val_samples :]
self.train_data = self.train_data[: -self.val_samples]
self.train_labels = self.train_labels[: -self.val_samples]
else:
self.test_data = data
self.test_labels = labels
|
29b294a6215575a94baa392fda3e6025ae7c8f32
|
b06340ae3dfcb551bacefa362c034b064809fd28
|
/src/pytest_check/pseudo_traceback.py
|
ccbf1d84d6a0700b0de8aea544f3f3ff892bb42c
|
[
"MIT"
] |
permissive
|
okken/pytest-check
|
cd3b82ae31932d54550822abb6cc96fa6b4e7c88
|
c7e7741e4d5665a07b0985932acc484aac2d5095
|
refs/heads/main
| 2023-08-19T09:10:40.776832
| 2023-08-11T20:44:36
| 2023-08-11T20:44:36
| 108,791,429
| 282
| 35
|
MIT
| 2023-08-11T20:37:17
| 2017-10-30T02:22:27
|
Python
|
UTF-8
|
Python
| false
| false
| 3,567
|
py
|
pseudo_traceback.py
|
import inspect
import os
import re
from pprint import pformat
_traceback_style = "auto"
def get_full_context(frame):
(_, filename, line, funcname, contextlist) = frame[0:5]
locals = frame.frame.f_locals
tb_hide = locals.get("__tracebackhide__", False)
try:
filename = os.path.relpath(filename)
except ValueError: # pragma: no cover
# this is necessary if we're tracing to a different drive letter
# such as C: to D:
#
# Turning off coverage for abspath, for now,
# since that path requires testing with an odd setup.
# But.... we'll keep looking for a way to test it. :)
filename = os.path.abspath(filename) # pragma: no cover
context = contextlist[0].strip() if contextlist else ""
return (filename, line, funcname, context, locals, tb_hide)
COLOR_RED = "\x1b[31m"
COLOR_RESET = "\x1b[0m"
def reformat_raw_traceback(lines, color):
formatted = []
for line in lines:
if 'Traceback (most recent call last)' in line:
continue
if 'AssertionError' in line:
if color:
line = f"{COLOR_RED}{line}{COLOR_RESET}"
formatted.append(line)
continue
result = re.search(r'File "(.*)", line (.*), in (\w*)$\n\W*(.*)',
line, flags=re.MULTILINE)
if result:
file_path, line_no, func_name, context = result.groups()
file_name = os.path.basename(file_path)
if color:
file_name = f"{COLOR_RED}{file_name}{COLOR_RESET}"
#formatted.append(f'{file_name}:{line_no} in {func_name}\n {context}')
formatted.append(f'{file_name}:{line_no} in {func_name} -> {context}')
else:
# I don't have a test case to hit this clause yet
# And I can't think of one.
# But it feels weird to not have the if/else.
# Thus the "no cover"
formatted.append(line) # pragma: no cover
return '\n'.join(formatted)
def _build_pseudo_trace_str(showlocals, tb, color):
"""
built traceback styles for better error message
only supports no
"""
if _traceback_style == "no":
return ""
skip_own_frames = 3
pseudo_trace = []
func = ""
if tb:
pseudo_trace.append(reformat_raw_traceback(tb, color))
context_stack = inspect.stack()[skip_own_frames:]
while "test_" not in func and context_stack:
full_context = get_full_context(context_stack.pop(0))
(file, line, func, context, locals, tb_hide) = full_context
# we want to trace through user code, not 3rd party or builtin libs
if "site-packages" in file:
break
# if called outside of a test, we might hit this
if "<module>" in func:
break
if tb_hide:
continue
if showlocals:
for name, val in reversed(locals.items()):
if not name.startswith('@py'):
pseudo_trace.append("%-10s = %s" % (name, pformat(val,
sort_dicts=False,
compact=True)))
file = os.path.basename(file)
if color:
file = f"{COLOR_RED}{file}{COLOR_RESET}"
#line = f"{file}:{line} in {func}\n {context}"
line = f"{file}:{line} in {func}() -> {context}"
pseudo_trace.append(line)
return "\n".join(reversed(pseudo_trace)) + "\n"
|
df370e9814db36a73d8852a45b2a0fb4a2d872f1
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/tools/perf/core/perfetto_binary_roller/binary_deps_manager.py
|
1619dcb78a5262e9de6cefa6644012281eca136c
|
[
"BSD-3-Clause"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 9,519
|
py
|
binary_deps_manager.py
|
# Copyright 2022 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import posixpath
import stat
import subprocess
import sys
import tempfile
import six
# When py_utils from the catapult repo are not available (as is the case
# on autorollers), import a small replacement.
try:
from py_utils import cloud_storage
except ImportError:
from core.perfetto_binary_roller import cloud_storage
# Binaries are publicly readable, data files are for internal use only.
BINARY_BUCKET = cloud_storage.PUBLIC_BUCKET
BINARY_CS_FOLDER = 'perfetto_binaries'
DATA_BUCKET = cloud_storage.INTERNAL_BUCKET
DATA_CS_FOLDER = 'perfetto_data'
LATEST_FILENAME = 'latest'
# This is the bucket where Perfetto LUCI prebuilts are stored
PERFETTO_BINARY_BUCKET = 'perfetto-luci-artifacts'
PLATFORM_TO_PERFETTO_FOLDER = {
'linux': 'linux-amd64',
'linux_arm': 'linux-arm',
'linux_arm64': 'linux-arm64',
'mac': 'mac-amd64',
'mac_arm': 'mac-arm',
'mac_arm64': 'mac-arm64',
'win': 'windows-amd64',
}
LOCAL_STORAGE_FOLDER = os.path.abspath(
os.path.join(os.path.dirname(__file__), 'bin'))
CONFIG_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), 'binary_deps.json'))
def _IsRunningOnCrosDevice():
"""Returns True if we're on a ChromeOS device."""
lsb_release = '/etc/lsb-release'
if sys.platform.startswith('linux') and os.path.exists(lsb_release):
with open(lsb_release, 'r') as f:
res = f.read()
if res.count('CHROMEOS_RELEASE_NAME'):
return True
return False
def _GetHostOsName():
if _IsRunningOnCrosDevice():
return 'chromeos'
if sys.platform.startswith('linux'):
return 'linux'
if sys.platform == 'darwin':
return 'mac'
if sys.platform == 'win32':
return 'win'
return None
def _GetHostArch():
uname_arch = six.ensure_str(subprocess.check_output(['uname', '-m']).strip())
if uname_arch == 'armv7l':
return 'arm'
if uname_arch == 'aarch64':
return 'arm64'
return uname_arch
def _GetLinuxBinaryArch(binary_name):
file_output = six.ensure_str(subprocess.check_output(['file', binary_name]))
file_arch = file_output.split(',')[1].strip()
if file_arch == 'x86-64':
return 'x86_64'
if file_arch == 'ARM':
return 'arm'
if file_arch == 'ARM aarch64':
return 'arm64'
return file_arch
def _GetMacBinaryArch(binary_name):
file_output = six.ensure_str(subprocess.check_output(['file', binary_name]))
return file_output.split()[-1].strip()
def _GetHostPlatform():
os_name = _GetHostOsName()
# If we're running directly on a Chrome OS device, fetch the binaries for
# linux instead, which should be compatible with CrOS.
if os_name in ['chromeos', 'linux']:
arch = _GetHostArch()
if arch == 'x86_64':
return 'linux'
return 'linux_' + arch
if os_name == 'mac':
return 'mac_arm64' if _GetHostArch() == 'arm64' else 'mac'
return os_name
def _GetBinaryPlatform(binary_name):
host_platform = _GetHostPlatform()
# Binaries built on linux may be for linux or chromeos on different
# architectures.
if host_platform.startswith('linux'):
arch = _GetLinuxBinaryArch(binary_name)
if arch == 'x86_64':
return 'linux'
return 'linux' + '_' + arch
# Binaries built on mac may be either for arm64 or intel.
if host_platform.startswith('mac'):
arch = _GetMacBinaryArch(binary_name)
if arch == 'x86_64':
return 'mac'
return 'mac' + '_' + arch
# Binaries built on windows are for windows intel always.
return host_platform
def _CalculateHash(full_remote_path):
bucket, remote_path = full_remote_path.split('/', 1)
with tempfile.NamedTemporaryFile(delete=False) as f:
f.close()
cloud_storage.Get(bucket, remote_path, f.name)
return cloud_storage.CalculateHash(f.name)
def _SetLatestPathForBinaryChromium(binary_name, platform, latest_path):
with tempfile.NamedTemporaryFile(mode='w', delete=False) as latest_file:
latest_file.write(latest_path)
latest_file.close()
remote_latest_file = posixpath.join(BINARY_CS_FOLDER, binary_name, platform,
LATEST_FILENAME)
cloud_storage.Insert(BINARY_BUCKET,
remote_latest_file,
latest_file.name,
publicly_readable=True)
def UploadHostBinaryChromium(binary_name, binary_path, version):
"""Upload the binary to the cloud.
This function uploads the host binary (e.g. trace_processor_shell) to the
cloud and updates the 'latest' file for the host platform to point to the
newly uploaded file. Note that it doesn't modify the config and so doesn't
affect which binaries will be downloaded by FetchHostBinary.
"""
filename = os.path.basename(binary_path)
platform = _GetBinaryPlatform(binary_path)
remote_path = posixpath.join(BINARY_CS_FOLDER, binary_name, platform, version,
filename)
if not cloud_storage.Exists(BINARY_BUCKET, remote_path):
cloud_storage.Insert(BINARY_BUCKET,
remote_path,
binary_path,
publicly_readable=True)
_SetLatestPathForBinaryChromium(binary_name, platform, remote_path)
def GetLatestFullPathChromium(binary_name, platform):
with tempfile.NamedTemporaryFile(delete=False) as latest_file:
latest_file.close()
remote_path = posixpath.join(BINARY_CS_FOLDER, binary_name, platform,
LATEST_FILENAME)
cloud_storage.Get(BINARY_BUCKET, remote_path, latest_file.name)
with open(latest_file.name) as latest:
return posixpath.join(BINARY_BUCKET, latest.read())
def GetLatestFullPathPerfetto(binary_name, platform):
path_wildcard = ('*/%s/%s' %
(PLATFORM_TO_PERFETTO_FOLDER[platform], binary_name))
path_list = cloud_storage.ListFiles(PERFETTO_BINARY_BUCKET,
path_wildcard,
sort_by='time')
if not path_list:
raise RuntimeError('No pre-built binary found for platform %s.' % platform)
return PERFETTO_BINARY_BUCKET + path_list[-1]
def GetCurrentFullPath(binary_name, platform):
with open(CONFIG_PATH) as f:
config = json.load(f)
return config[binary_name][platform]['full_remote_path']
def SwitchBinaryToNewFullPath(binary_name, platform, new_full_path):
"""Switch the binary version in use to the latest one.
This function updates the config file to contain the path to the latest
available binary version. This will make FetchHostBinary download the latest
file.
"""
with open(CONFIG_PATH) as f:
config = json.load(f)
config.setdefault(binary_name,
{}).setdefault(platform,
{})['full_remote_path'] = new_full_path
config.setdefault(binary_name,
{}).setdefault(platform,
{})['hash'] = _CalculateHash(new_full_path)
with open(CONFIG_PATH, 'w') as f:
json.dump(config, f, indent=4, separators=(',', ': '))
def FetchHostBinary(binary_name):
"""Download the binary from the cloud.
This function fetches the binary for the host platform from the cloud.
The cloud path is read from the config.
"""
with open(CONFIG_PATH) as f:
config = json.load(f)
platform = _GetHostPlatform()
full_remote_path = config[binary_name][platform]['full_remote_path']
bucket, remote_path = full_remote_path.split('/', 1)
expected_hash = config[binary_name][platform]['hash']
filename = posixpath.basename(remote_path)
local_path = os.path.join(LOCAL_STORAGE_FOLDER, filename)
cloud_storage.Get(bucket, remote_path, local_path)
if cloud_storage.CalculateHash(local_path) != expected_hash:
raise RuntimeError('The downloaded binary has wrong hash.')
mode = os.stat(local_path).st_mode
os.chmod(local_path, mode | stat.S_IXUSR)
return local_path
def FetchDataFile(data_file_name):
"""Download the file from the cloud."""
with open(CONFIG_PATH) as f:
config = json.load(f)
full_remote_path = config[data_file_name]['full_remote_path']
bucket, remote_path = full_remote_path.split('/', 1)
expected_hash = config[data_file_name]['hash']
filename = posixpath.basename(remote_path)
local_path = os.path.join(LOCAL_STORAGE_FOLDER, filename)
cloud_storage.Get(bucket, remote_path, local_path)
if cloud_storage.CalculateHash(local_path) != expected_hash:
raise RuntimeError('The downloaded data file has wrong hash.')
return local_path
def UploadAndSwitchDataFile(data_file_name, data_file_path, version):
"""Upload the script to the cloud and update config to use the new version."""
filename = os.path.basename(data_file_path)
bucket = DATA_BUCKET
remote_path = posixpath.join(DATA_CS_FOLDER, data_file_name, version,
filename)
full_remote_path = posixpath.join(bucket, remote_path)
if not cloud_storage.Exists(DATA_BUCKET, remote_path):
cloud_storage.Insert(bucket,
remote_path,
data_file_path,
publicly_readable=False)
with open(CONFIG_PATH) as f:
config = json.load(f)
config[data_file_name]['full_remote_path'] = full_remote_path
config[data_file_name]['hash'] = cloud_storage.CalculateHash(data_file_path)
with open(CONFIG_PATH, 'w') as f:
json.dump(config, f, indent=4, separators=(',', ': '))
|
144d2b09984c1c547be8b15c58605d061c6baf12
|
cadb6dceb7bb67ce47ef48b2c83f480a65d6b01a
|
/s3prl/utility/check_hub.py
|
a5d879946b1636a274fd543eff525541bba523ca
|
[
"Apache-2.0",
"CC-BY-NC-4.0"
] |
permissive
|
s3prl/s3prl
|
52ec2ae4df5a61c786c122085603aa9c5e8c2681
|
76a9432b824f6ae3eae09a35a67782c4ed582832
|
refs/heads/main
| 2023-08-17T02:26:57.524087
| 2023-06-10T17:12:27
| 2023-06-10T17:12:27
| 196,905,457
| 1,549
| 398
|
Apache-2.0
| 2023-09-14T13:07:05
| 2019-07-15T01:54:52
|
Python
|
UTF-8
|
Python
| false
| false
| 1,033
|
py
|
check_hub.py
|
import argparse
import torch
import hubconf
parser = argparse.ArgumentParser()
upstreams = [attr for attr in dir(hubconf) if callable(getattr(hubconf, attr)) and attr[0] != '_']
parser.add_argument('--mode', choices=['list', 'help', 'load'], required=True)
parser.add_argument('--upstream', choices=upstreams)
parser.add_argument('--ckpt', help='The PATH/URL/GOOGLE_DRIVE_ID of upstream checkpoint, not always needed')
parser.add_argument('--config', help='The PATH of upstream config, not always needed')
parser.add_argument('--refresh', action='store_true', help='Whether to re-download upstream contents')
args = parser.parse_args()
if args.mode == 'list':
print(torch.hub.list('s3prl/s3prl', force_reload=args.refresh))
elif args.mode == 'help':
print(torch.hub.help('s3prl/s3prl', args.upstream, force_reload=args.refresh))
elif args.mode == 'load':
print(torch.hub.load(
's3prl/s3prl', args.upstream, force_reload=args.refresh,
ckpt=args.ckpt, config=args.config, refresh=args.refresh
))
|
077ae4b648c61c16f9583aac26a24941285cfeef
|
fa3f6d4e9169fb95f828013d179d03accdff381b
|
/grr/server/grr_response_server/export_converters/network.py
|
fd707ca6ac4629ca450bcd720d859fd925ef84af
|
[
"Apache-2.0"
] |
permissive
|
google/grr
|
c51a2bd251ed2f7adae538541990a2cc01fdcc8c
|
44c0eb8c938302098ef7efae8cfd6b90bcfbb2d6
|
refs/heads/master
| 2023-09-05T20:02:36.823914
| 2023-07-26T09:34:09
| 2023-07-26T09:34:09
| 14,909,673
| 4,683
| 927
|
Apache-2.0
| 2023-07-26T09:34:10
| 2013-12-04T00:17:53
|
Python
|
UTF-8
|
Python
| false
| false
| 4,124
|
py
|
network.py
|
#!/usr/bin/env python
"""Classes for exporting network-related data."""
from typing import Iterator, List
from grr_response_core.lib.rdfvalues import client_network as rdf_client_network
from grr_response_core.lib.rdfvalues import structs as rdf_structs
from grr_response_proto import export_pb2
from grr_response_server.export_converters import base
class ExportedNetworkConnection(rdf_structs.RDFProtoStruct):
protobuf = export_pb2.ExportedNetworkConnection
rdf_deps = [
base.ExportedMetadata,
rdf_client_network.NetworkEndpoint,
]
class ExportedDNSClientConfiguration(rdf_structs.RDFProtoStruct):
protobuf = export_pb2.ExportedDNSClientConfiguration
rdf_deps = [
base.ExportedMetadata,
]
class ExportedNetworkInterface(rdf_structs.RDFProtoStruct):
protobuf = export_pb2.ExportedNetworkInterface
rdf_deps = [
base.ExportedMetadata,
]
class NetworkConnectionToExportedNetworkConnectionConverter(
base.ExportConverter):
"""Converts NetworkConnection to ExportedNetworkConnection."""
input_rdf_type = rdf_client_network.NetworkConnection
def Convert(
self, metadata: base.ExportedMetadata,
conn: rdf_client_network.NetworkConnection
) -> List[ExportedNetworkConnection]:
"""Converts a NetworkConnection into a ExportedNetworkConnection.
Args:
metadata: ExportedMetadata to be added to the ExportedNetworkConnection.
conn: NetworkConnection to be converted.
Returns:
A list with a single ExportedNetworkConnection containing the converted
NetworkConnection.
"""
result = ExportedNetworkConnection(
metadata=metadata,
family=conn.family,
type=conn.type,
local_address=conn.local_address,
remote_address=conn.remote_address,
state=conn.state,
pid=conn.pid,
ctime=conn.ctime)
return [result]
class InterfaceToExportedNetworkInterfaceConverter(base.ExportConverter):
"""Converts Interface to ExportedNetworkInterface."""
input_rdf_type = rdf_client_network.Interface
def Convert(
self, metadata: base.ExportedMetadata,
interface: rdf_client_network.Interface
) -> Iterator[ExportedNetworkInterface]:
"""Converts a Interface into ExportedNetworkInterfaces.
Args:
metadata: ExportedMetadata to be added to the ExportedNetworkInterface.
interface: (Network) Interface to be converted.
Yields:
An ExportedNetworkInterface containing the converted Interface.
"""
ip4_addresses = []
ip6_addresses = []
for addr in interface.addresses:
if addr.address_type == addr.Family.INET:
ip4_addresses.append(addr.human_readable_address)
elif addr.address_type == addr.Family.INET6:
ip6_addresses.append(addr.human_readable_address)
else:
raise ValueError("Invalid address type: %s" % addr.address_type)
result = ExportedNetworkInterface(
metadata=metadata,
ifname=interface.ifname,
ip4_addresses=" ".join(ip4_addresses),
ip6_addresses=" ".join(ip6_addresses))
if interface.mac_address:
result.mac_address = interface.mac_address.human_readable_address
yield result
class DNSClientConfigurationToExportedDNSClientConfiguration(
base.ExportConverter):
"""Converts DNSClientConfiguration to ExportedDNSClientConfiguration."""
input_rdf_type = rdf_client_network.DNSClientConfiguration
def Convert(
self, metadata: base.ExportedMetadata,
config: rdf_client_network.DNSClientConfiguration
) -> Iterator[ExportedDNSClientConfiguration]:
"""Converts a DNSClientConfiguration into a ExportedDNSClientConfiguration.
Args:
metadata: ExportedMetadata to be added to the
ExportedDNSClientConfiguration.
config: DNSClientConfiguration to be converted.
Yields:
An ExportedDNSClientConfiguration containing the DNSClientConfiguration.
"""
result = ExportedDNSClientConfiguration(
metadata=metadata,
dns_servers=" ".join(config.dns_server),
dns_suffixes=" ".join(config.dns_suffix))
yield result
|
4595d3f0aa70383df8d225c3f7258f2deef526fe
|
a2b9d660b4bb57d117dfd5b7aae414f547757c49
|
/arknights_/common_operation.py
|
43c11957935d8d6806f8148eeb79f3f7916fa376
|
[
"MIT"
] |
permissive
|
MangetsuC/arkHelper
|
17dd470b73612f7fed87096dbd09b6e766c73f08
|
316d45710adab8d0594e1774d71228fc344be842
|
refs/heads/master
| 2022-12-07T01:53:00.799907
| 2022-06-01T04:52:37
| 2022-06-01T04:52:37
| 243,768,162
| 162
| 18
|
MIT
| 2022-11-22T11:01:24
| 2020-02-28T13:25:47
|
Python
|
UTF-8
|
Python
| false
| false
| 2,607
|
py
|
common_operation.py
|
from doctest import FAIL_FAST
from sys import path as spath
from os import getcwd
from time import sleep
from pip import main
spath.append(getcwd())
from user_res import R
from common2 import adb
from image_.match import match_pic
from image_.color_detect import find_color_block, get_point_color, check_point_color, Color_Point
#adb.ip = '127.0.0.1:7555'
def goto_mainpage():
capture = adb.getScreen_std()
task_pos = match_pic(capture, R.task)
if task_pos[0] < 0: #不在首页
while True:
capture = adb.getScreen_std()
btn_home_pos = match_pic(capture, R.btn_home)
if btn_home_pos[0] > -1:
break
else:
adb.click(10, 10)
sleep(5)
if btn_home_pos[0] > -1:
while True:
adb.click(btn_home_pos[0], btn_home_pos[1])
capture = adb.getScreen_std()
mainpage_pos = match_pic(capture, R.mainpage)
if mainpage_pos[0] > -1:
adb.click(mainpage_pos[0], mainpage_pos[1])
break
while True:
capture = adb.getScreen_std()
task_pos = match_pic(capture, R.task)
if task_pos[0] >-1:
break
def check_exit_mainpage():
for i in range(10):
if match_pic(adb.getScreen_std(), R.btn_back)[0] > -1:
return True
return False
def enter_common(pattern):
while True:
capture = adb.getScreen_std()
task_pos = match_pic(capture, pattern)
if task_pos[0] > -1:
adb.click(task_pos[0], task_pos[1])
if check_exit_mainpage():
break
def enter_task():
enter_common(R.task)
def enter_friends():
enter_common(R.friends)
if __name__ == '__main__':
while True:
capture = adb.getScreen_std()
print(check_point_color(capture, {'772*549': Color_Point(255, 94, 25),
'331*562': Color_Point(255, 255, 255),
'359*558': Color_Point(66, 66, 66),
'742*581': Color_Point(50, 50, 50),
'1195*597': Color_Point(66, 66, 66),
'47*25': Color_Point(255, 255, 255),
'129*47': Color_Point(255, 255, 255),
'194*47': Color_Point(255, 255, 255),
'272*27': Color_Point(255, 255, 255)}))
#goto_mainpage()
|
1d0fdb66b647cb13b85482c8adf952ca15053f8c
|
2dfbca22d0bacf7ba2bb4d270b2d3292f5f8a43b
|
/amulet/level/formats/leveldb_world/interface/chunk/leveldb_8.py
|
cfbc73963022d6886a4679a5a930c475690db31c
|
[] |
no_license
|
Amulet-Team/Amulet-Core
|
9715d888e2faf6c41f9414fd105aaa926aa501c2
|
dafef97fe4fd1f2f713ef1e3503d6b13b20c1c1f
|
refs/heads/main
| 2023-08-18T17:45:48.775423
| 2023-08-16T09:37:55
| 2023-08-16T09:37:55
| 130,729,079
| 117
| 26
| null | 2023-08-16T09:37:57
| 2018-04-23T17:00:04
|
Python
|
UTF-8
|
Python
| false
| false
| 7,326
|
py
|
leveldb_8.py
|
from __future__ import annotations
from .leveldb_7 import (
LevelDB7Interface,
)
from typing import Tuple, Dict, Optional, TYPE_CHECKING, List
import struct
import numpy
from numpy.typing import NDArray
from amulet_nbt import NamedTag, CompoundTag, StringTag, IntTag, ShortTag
from amulet.api.block import Block, PropertyDataTypes
from amulet.utils.numpy_helpers import brute_sort_objects_no_hash
from amulet.api.data_types import (
AnyNDArray,
VersionIdentifierTuple,
)
if TYPE_CHECKING:
from amulet.api.chunk.blocks import Blocks
PackedBlockT = Tuple[Tuple[Optional[int], Block], ...]
class LevelDB8Interface(LevelDB7Interface):
chunk_version = 8
def __init__(self):
super().__init__()
self._set_feature("terrain", "2fnpalette")
def _encode_subchunks(
self,
blocks: "Blocks",
palette: AnyNDArray,
bounds: Tuple[int, int],
max_world_version: VersionIdentifierTuple,
) -> Dict[int, Optional[bytes]]:
# Encode sub-chunk block format 8
# TODO: untangle this mess. The lack of typing in numpy is just making this harder.
palette_list: List[PackedBlockT] = list(palette)
min_y = bounds[0] // 16
max_y = bounds[1] // 16
if palette_list:
if palette_list[0][0][0] is None:
air = NamedTag(
CompoundTag(
{
"name": StringTag("minecraft:air"),
"val": ShortTag(0),
}
)
)
else:
air = NamedTag(
CompoundTag(
{
"name": StringTag("minecraft:air"),
"states": CompoundTag({}),
"version": IntTag(17_629_184), # 1, 13, 0, 0
}
)
)
packed_palette: List[Tuple[NamedTag, ...]] = []
for index, block in enumerate(palette_list):
full_block: List[NamedTag] = []
for sub_block_version, sub_block in block:
properties = sub_block.properties
if sub_block_version is None:
block_data = properties.get("block_data", IntTag(0))
if isinstance(block_data, IntTag):
block_data = block_data.py_int
# if block_data >= 16:
# block_data = 0
else:
block_data = 0
sub_block_ = NamedTag(
CompoundTag(
{
"name": StringTag(sub_block.namespaced_name),
"val": ShortTag(block_data),
}
)
)
else:
sub_block_ = NamedTag(
CompoundTag(
{
"name": StringTag(sub_block.namespaced_name),
"states": CompoundTag(
{
key: val
for key, val in properties.items()
if isinstance(val, PropertyDataTypes)
}
),
"version": IntTag(sub_block_version),
}
)
)
full_block.append(sub_block_)
packed_palette.append(tuple(full_block))
chunk = {}
palette_depth = numpy.array([len(block) for block in packed_palette])
for cy in range(min_y, max_y):
if cy in blocks:
palette_index, sub_chunk = numpy.unique(
blocks.get_sub_chunk(cy), return_inverse=True
)
sub_chunk_palette: List[Tuple[NamedTag, ...]] = [
packed_palette[i] for i in palette_index
]
sub_chunk_depth = palette_depth[palette_index].max()
if (
sub_chunk_depth == 1
and len(sub_chunk_palette) == 1
and sub_chunk_palette[0][0].compound.get_string("name").py_str
== "minecraft:air"
):
chunk[cy] = None
else:
# pad block_palette with air in the extra layers
sub_chunk_palette_full: NDArray[NamedTag] = numpy.empty(
(len(sub_chunk_palette), sub_chunk_depth), dtype=object
)
sub_chunk_palette_full.fill(air)
for index, block_tuple in enumerate(sub_chunk_palette):
for sub_index, block in enumerate(block_tuple):
sub_chunk_palette_full[index, sub_index] = block
# should now be a 2D array with an NamedTag in each element
if max_world_version[1] >= (
1,
17,
30,
): # Why do I need to check against game version and not chunk version
sub_chunk_bytes = [
b"\x09",
bytes([sub_chunk_depth]),
struct.pack("b", cy),
]
else:
sub_chunk_bytes = [b"\x08", bytes([sub_chunk_depth])]
for sub_chunk_layer_index in range(sub_chunk_depth):
# TODO: sort out a way to do this quicker without brute forcing it.
(
sub_chunk_layer_palette,
sub_chunk_remap,
) = brute_sort_objects_no_hash(
sub_chunk_palette_full[:, sub_chunk_layer_index]
)
sub_chunk_layer = sub_chunk_remap[sub_chunk.ravel()]
# sub_chunk_layer, sub_chunk_layer_palette = sub_chunk, sub_chunk_palette_full[:, sub_chunk_layer_index]
sub_chunk_bytes.append(
self._save_palette_subchunk(
sub_chunk_layer.reshape(16, 16, 16),
list(sub_chunk_layer_palette.ravel()),
)
)
chunk[cy] = b"".join(sub_chunk_bytes)
else:
chunk[cy] = None
else:
chunk = {i: None for i in range(min_y, max_y)}
return chunk
export = LevelDB8Interface
|
742d40ef15fd00e2494b0cb8aa4c084cb2739bc0
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/keywordCompletion/elseInCondExpr.py
|
4a73af1eb5e0c0a591403f415abafa8833ff4e47
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 24
|
py
|
elseInCondExpr.py
|
a = 1 if True els<caret>
|
b8de3bf471cc603c22db99a4206ef863fbb0494d
|
0009c76a25c89a0d61d3bc9e10071da58bdfaa5a
|
/py/ztools/lib/Utils.py
|
212ee242fc6307195c274a634ab984487564774e
|
[
"MIT"
] |
permissive
|
julesontheroad/NSC_BUILDER
|
84054e70a80b572088b0806a47ceb398302451b5
|
e9083e83383281bdd9e167d3141163dcc56b6710
|
refs/heads/master
| 2023-07-05T05:23:17.114363
| 2021-11-15T19:34:47
| 2021-11-15T19:34:47
| 149,040,416
| 1,249
| 143
|
MIT
| 2022-12-15T03:19:33
| 2018-09-16T22:18:01
|
Python
|
UTF-8
|
Python
| false
| false
| 3,606
|
py
|
Utils.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import os, sys
import io
import re
from binascii import hexlify as hx, unhexlify as uhx
from struct import pack as pk, unpack as upk
def memdump(data, length=16, message=''):
assert data, 'Nothing to dump'
dump = []
first = True
while data:
dump.append(message + ' '.join('%02x' % data[n] for n in range(min(length, len(data)))))
data = data[length:]
if first:
message = len(message) * ' '
first = False
return '\n'.join(dump)
def check_tkey(tkey):
return re.match('[a-fA-F0-9]{32}', tkey)
def check_tid(tid):
return re.match('0100[a-fA-F0-9]{12}', tid)
def read_at(fp, off, len):
fp.seek(off)
return fp.read(len)
def read_u8(fp, off):
return upk('<B', read_at(fp, off, 1))[0]
def read_u16(fp, off):
return upk('<H', read_at(fp, off, 2))[0]
def read_u32(fp, off):
return upk('<I', read_at(fp, off, 4))[0]
def read_u48(fp, off):
s = upk('<HI', read_at(fp, off, 6))
return s[1] << 16 | s[0]
def read_u64(fp, off):
return upk('<Q', read_at(fp, off, 8))[0]
def pk_u8(nb, endianness='<'):
return pk(endianness+'B', nb)
def pk_u16(nb, endianness='<'):
return pk(endianness+'H', nb)
def pk_u32(nb, endianness='<'):
return pk(endianness+'I', nb)
def pk_u48(nb, endianness='<'):
return pk_u64(nb, endianness=endianness)[:6]
def pk_u64(nb, endianness='<'):
return pk(endianness+'Q', nb)
def pad_to(string, length=0, multiple=0, char=b'\x00'):
if type(string) is str:
string = string.encode()
if length:
if len(string) == length:
return string
assert len(string) < length, 'String is longer than specified length'
return bytes(string) + (length - len(string)) * char
elif multiple:
if len(string) % multiple == 0:
return string
return bytes(string) + (multiple - (len(string) % multiple)) * char
else:
raise Exception('Nothing to pad string to')
def align_to(nb, alignment):
return (nb + (alignment - 1)) & ~(alignment - 1)
def bytes2human(n, f='%(value).1f %(symbol)s'):
n = int(n)
assert n >= 0, 'Can\'t convert negative input to byte units'
symbols = ('B', 'KB', 'MB', 'GB', 'TB')
prefix = {}
for i, s in enumerate(symbols[1:]):
prefix[s] = 10**(3 * i) * 1000
for symbol in reversed(symbols[1:]):
if n >= prefix[symbol]:
value = float(n) / prefix[symbol]
return f % locals()
return f % dict(symbol=symbols[0], value=n)
class FileInContainer(io.BufferedReader):
'''Hacky class to redirect read operations to the container, instead of loading everything in memory'''
def __init__(self, fp, offset_in_container, size):
super(FileInContainer, self).__init__(fp)
self.f = fp
self.offset_in_cont = offset_in_container
self.size = size
self.seek(0) # Doesn't default there
def seek(self, offset, whence=0):
if whence == 0:
self._pos = offset
self.f.seek(self.offset_in_cont + offset)
elif whence == 1:
self._pos += offset
self.f.seek(self.offset_in_cont + self._pos + offset)
elif whence == 2:
if offset > 0:
offset *= -1
self._pos = self.size + offset
self.f.seek(self.offset_in_cont + self.size + offset, whence)
def tell(self):
return self._pos
def read(self, size=None):
if self.offset_in_cont + self._pos != self.f.tell():
self.seek(self._pos)
if self.tell() > self.size:
data = b''
elif size is None or size + self.tell() > self.size:
data = self.f.read(self.size - self.tell())
self._pos = self.size
else:
data = self.f.read(size)
self._pos += size
return data
def write(self, data):
raise NotImplementedError
def close(self):
# File of container is closed along with this object
return
|
83a2aad12a265c68f589b5be3747126378158587
|
7f523c407d45d116860eff67f079e807f2b53339
|
/src/third_party/beaengine/tests/0f385c.py
|
3064594dccf5efac884203b1b4e55d702ee31e52
|
[
"LGPL-3.0-only",
"LGPL-2.0-or-later",
"MIT"
] |
permissive
|
0vercl0k/rp
|
a352c96bfe3715eb9ce8c5942831123e65289dac
|
b24e7f58a594aaf0ce3771745bf06862f6ecc074
|
refs/heads/master
| 2023-08-30T08:03:14.842828
| 2023-08-09T00:41:00
| 2023-08-09T00:41:00
| 3,554,173
| 1,557
| 239
|
MIT
| 2023-08-09T00:41:02
| 2012-02-26T19:26:33
|
C++
|
UTF-8
|
Python
| false
| false
| 2,239
|
py
|
0f385c.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
# @author : beaengine@gmail.com
from headers.BeaEnginePython import *
from nose.tools import *
class TestSuite:
def test(self):
# VEX.128.F3.0F38.W0 5C 11:rrr:bbb
# TDPBF16PS tmm1, tmm2, tmm3
myVEX = VEX('VEX.128.F3.0F38.W0')
myVEX.vvvv = 0b1111
myVEX.R = 0b1
Buffer = bytes.fromhex('{}5cc0'.format(myVEX.c4()))
myDisasm = Disasm(Buffer)
length = myDisasm.read()
assert_equal(myDisasm.length, len(Buffer))
assert_equal(myDisasm.infos.Instruction.Opcode, 0x5c)
assert_equal(myDisasm.infos.Instruction.Category, AMX_INSTRUCTION)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'tdpbf16ps')
assert_equal(myDisasm.infos.Operand1.OpType, REGISTER_TYPE)
assert_equal(myDisasm.infos.Operand1.Registers.tmm, REG0)
assert_equal(myDisasm.infos.Operand1.OpSize, 8192)
assert_equal(myDisasm.infos.Operand1.AccessMode, WRITE)
assert_equal(myDisasm.infos.Operand2.OpType, REGISTER_TYPE)
assert_equal(myDisasm.infos.Operand2.Registers.tmm, REG0)
assert_equal(myDisasm.infos.Operand2.OpSize, 8192)
assert_equal(myDisasm.infos.Operand2.AccessMode, READ)
assert_equal(myDisasm.infos.Operand3.OpType, REGISTER_TYPE)
assert_equal(myDisasm.infos.Operand3.Registers.tmm, REG0)
assert_equal(myDisasm.infos.Operand3.OpSize, 8192)
assert_equal(myDisasm.infos.Operand3.AccessMode, READ)
assert_equal(myDisasm.repr(), 'tdpbf16ps tmm0, tmm0, tmm0')
|
6df7f6203c33399ca084b54c93c23936ae0a3b36
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/AnttechBlockchainFinanceMylogisticfinsandboxMessagePublishModel.py
|
647794746f6e40771a5d1c38a236f4f5e15d3d50
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,407
|
py
|
AnttechBlockchainFinanceMylogisticfinsandboxMessagePublishModel.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AnttechBlockchainFinanceMylogisticfinsandboxMessagePublishModel(object):
def __init__(self):
self._data = None
self._method_name = None
@property
def data(self):
return self._data
@data.setter
def data(self, value):
self._data = value
@property
def method_name(self):
return self._method_name
@method_name.setter
def method_name(self, value):
self._method_name = value
def to_alipay_dict(self):
params = dict()
if self.data:
if hasattr(self.data, 'to_alipay_dict'):
params['data'] = self.data.to_alipay_dict()
else:
params['data'] = self.data
if self.method_name:
if hasattr(self.method_name, 'to_alipay_dict'):
params['method_name'] = self.method_name.to_alipay_dict()
else:
params['method_name'] = self.method_name
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AnttechBlockchainFinanceMylogisticfinsandboxMessagePublishModel()
if 'data' in d:
o.data = d['data']
if 'method_name' in d:
o.method_name = d['method_name']
return o
|
0351f7b4a49a6d6e2e704c2d64c6a8dbdc1851e5
|
10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94
|
/Python/zigzag-iterator.py
|
be5a5f605f7cdc07c187237a2b0f1b59b2b83390
|
[
"MIT"
] |
permissive
|
kamyu104/LeetCode-Solutions
|
f54822059405ef4df737d2e9898b024f051fd525
|
4dc4e6642dc92f1983c13564cc0fd99917cab358
|
refs/heads/master
| 2023-09-02T13:48:26.830566
| 2023-08-28T10:11:12
| 2023-08-28T10:11:12
| 152,631,182
| 4,549
| 1,651
|
MIT
| 2023-05-31T06:10:33
| 2018-10-11T17:38:35
|
C++
|
UTF-8
|
Python
| false
| false
| 602
|
py
|
zigzag-iterator.py
|
# Time: O(n)
# Space: O(k)
import collections
class ZigzagIterator(object):
def __init__(self, v1, v2):
"""
Initialize your q structure here.
:type v1: List[int]
:type v2: List[int]
"""
self.q = collections.deque([(len(v), iter(v)) for v in (v1, v2) if v])
def next(self):
"""
:rtype: int
"""
len, iter = self.q.popleft()
if len > 1:
self.q.append((len-1, iter))
return next(iter)
def hasNext(self):
"""
:rtype: bool
"""
return bool(self.q)
|
b4c61a030962d37fbf215d52a5dd2cb737f30ce6
|
0ba2e5061577f6286ff9265ef1df9aca96769445
|
/data_structures/Graphs/graphsearch/a-star-search/python/util/path.py
|
5c55f21b1290a95cef5130b8b3e79726fd4d842c
|
[
"CC0-1.0"
] |
permissive
|
ZoranPandovski/al-go-rithms
|
68d5d02f80a61de9baf8e50a81a52e7d0b3983a0
|
4ae6ba54e90af14af236e03e435eb0402dcac787
|
refs/heads/master
| 2023-09-04T16:04:04.321676
| 2023-06-06T15:22:16
| 2023-06-06T15:22:16
| 93,438,176
| 1,421
| 2,445
|
CC0-1.0
| 2023-06-15T14:24:28
| 2017-06-05T19:20:20
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 501
|
py
|
path.py
|
class path:
def __init__(self,source,destination,cost):
"""
Described a path from a node to another node including its cost.
:param source: from this node
:param destination: to this node
:param cost: cost to travel the path
"""
self.source=source
self.destination=destination
self.cost=cost
def __repr__(self):
return "path:[source={}, destination={}, cost={}]".format(self.source,self.destination,self.cost)
|
516d74419fa5851971207a4fd109afb332c0ed75
|
71fb04f723b46a1bf45295be239bcec25e07f98c
|
/keras_cv/models/backbones/efficientnet_v1/efficientnet_v1_backbone.py
|
c90bebf19846f24ed9f7570504334ee848e96eb2
|
[
"Apache-2.0"
] |
permissive
|
keras-team/keras-cv
|
9bca4479474e853ec3a1c541b8be20fea2447a1a
|
e83f229f1b7b847cd712d5cd4810097d3e06d14e
|
refs/heads/master
| 2023-08-31T10:22:08.406394
| 2023-08-30T20:24:57
| 2023-08-30T20:24:57
| 265,079,853
| 818
| 287
|
NOASSERTION
| 2023-09-12T16:49:01
| 2020-05-18T22:39:21
|
Python
|
UTF-8
|
Python
| false
| false
| 16,262
|
py
|
efficientnet_v1_backbone.py
|
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import math
from keras_cv.backend import keras
from keras_cv.models import utils
from keras_cv.models.backbones.backbone import Backbone
from keras_cv.models.backbones.efficientnet_v1.efficientnet_v1_backbone_presets import ( # noqa: E501
backbone_presets,
)
from keras_cv.utils.python_utils import classproperty
@keras.saving.register_keras_serializable(package="keras_cv.models")
class EfficientNetV1Backbone(Backbone):
"""Instantiates the EfficientNetV1 architecture.
Reference:
- [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946)
(ICML 2019)
- [Based on the original keras.applications EfficientNet](https://github.com/keras-team/keras/blob/master/keras/applications/efficientnet.py)
Args:
include_rescaling: bool, whether to rescale the inputs. If set to
True, inputs will be passed through a `Rescaling(1/255.0)` layer.
width_coefficient: float, scaling coefficient for network width.
depth_coefficient: float, scaling coefficient for network depth.
dropout_rate: float, dropout rate before final classifier layer.
drop_connect_rate: float, dropout rate at skip connections. The default
value is set to 0.2.
depth_divisor: integer, a unit of network width. The default value is
set to 8.
activation: activation function to use between each convolutional layer.
input_shape: optional shape tuple, it should have exactly 3 input
channels.
input_tensor: optional Keras tensor (i.e. output of `keras.keras.layers.Input()`) to
use as image input for the model.
stackwise_kernel_sizes: list of ints, the kernel sizes used for each
conv block.
stackwise_num_repeats: list of ints, number of times to repeat each
conv block.
stackwise_input_filters: list of ints, number of input filters for
each conv block.
stackwise_output_filters: list of ints, number of output filters for
each stack in the conv blocks model.
stackwise_expansion_ratios: list of floats, expand ratio passed to the
squeeze and excitation blocks.
stackwise_strides: list of ints, stackwise_strides for each conv block.
stackwise_squeeze_and_excite_ratios: list of ints, the squeeze and
excite ratios passed to the squeeze and excitation blocks.
Usage:
```python
# Construct an EfficientNetV1 from a preset:
efficientnet = keras_cv.models.EfficientNetV1Backbone.from_preset(
"efficientnetv1_b0"
)
images = np.ones((1, 256, 256, 3))
outputs = efficientnet.predict(images)
# Alternatively, you can also customize the EfficientNetV1 architecture:
model = EfficientNetV1Backbone(
stackwise_kernel_sizes=[3, 3, 5, 3, 5, 5, 3],
stackwise_num_repeats=[1, 2, 2, 3, 3, 4, 1],
stackwise_input_filters=[32, 16, 24, 40, 80, 112, 192],
stackwise_output_filters=[16, 24, 40, 80, 112, 192, 320],
stackwise_expansion_ratios=[1, 6, 6, 6, 6, 6, 6],
stackwise_strides=[1, 2, 2, 2, 1, 2, 1],
stackwise_squeeze_and_excite_ratios=[
0.25,
0.25,
0.25,
0.25,
0.25,
0.25,
0.25,
],
width_coefficient=1.0,
depth_coefficient=1.0,
include_rescaling=False,
)
images = np.ones((1, 256, 256, 3))
outputs = efficientnet.predict(images)
```
""" # noqa: E501
def __init__(
self,
*,
include_rescaling,
width_coefficient,
depth_coefficient,
stackwise_kernel_sizes,
stackwise_num_repeats,
stackwise_input_filters,
stackwise_output_filters,
stackwise_expansion_ratios,
stackwise_strides,
stackwise_squeeze_and_excite_ratios,
dropout_rate=0.2,
drop_connect_rate=0.2,
depth_divisor=8,
input_shape=(None, None, 3),
input_tensor=None,
activation="swish",
**kwargs,
):
img_input = utils.parse_model_inputs(input_shape, input_tensor)
x = img_input
if include_rescaling:
# Use common rescaling strategy across keras_cv
x = keras.layers.Rescaling(1.0 / 255.0)(x)
x = keras.layers.ZeroPadding2D(
padding=utils.correct_pad_downsample(x, 3), name="stem_conv_pad"
)(x)
# Build stem
stem_filters = round_filters(
filters=stackwise_input_filters[0],
width_coefficient=width_coefficient,
divisor=depth_divisor,
)
x = keras.layers.Conv2D(
filters=stem_filters,
kernel_size=3,
strides=2,
padding="valid",
use_bias=False,
kernel_initializer=conv_kernel_initializer(),
name="stem_conv",
)(x)
x = keras.layers.BatchNormalization(
axis=3,
name="stem_bn",
)(x)
x = keras.layers.Activation(activation, name="stem_activation")(x)
# Build blocks
block_id = 0
blocks = float(sum(stackwise_num_repeats))
pyramid_level_inputs = []
for i in range(len(stackwise_kernel_sizes)):
num_repeats = stackwise_num_repeats[i]
input_filters = stackwise_input_filters[i]
output_filters = stackwise_output_filters[i]
# Update block input and output filters based on depth multiplier.
input_filters = round_filters(
filters=input_filters,
width_coefficient=width_coefficient,
divisor=depth_divisor,
)
output_filters = round_filters(
filters=output_filters,
width_coefficient=width_coefficient,
divisor=depth_divisor,
)
repeats = round_repeats(
repeats=num_repeats,
depth_coefficient=depth_coefficient,
)
strides = stackwise_strides[i]
squeeze_and_excite_ratio = stackwise_squeeze_and_excite_ratios[i]
for j in range(repeats):
# The first block needs to take care of stride and filter size
# increase.
if j > 0:
strides = 1
input_filters = output_filters
if strides != 1:
pyramid_level_inputs.append(utils.get_tensor_input_name(x))
# 97 is the start of the lowercase alphabet.
letter_identifier = chr(j + 97)
x = apply_efficientnet_block(
inputs=x,
filters_in=input_filters,
filters_out=output_filters,
kernel_size=stackwise_kernel_sizes[i],
strides=strides,
expand_ratio=stackwise_expansion_ratios[i],
se_ratio=squeeze_and_excite_ratio,
activation=activation,
dropout_rate=drop_connect_rate * block_id / blocks,
name="block{}{}_".format(i + 1, letter_identifier),
)
block_id += 1
# Build top
top_filters = round_filters(
filters=1280,
width_coefficient=width_coefficient,
divisor=depth_divisor,
)
x = keras.layers.Conv2D(
filters=top_filters,
kernel_size=1,
padding="same",
strides=1,
kernel_initializer=conv_kernel_initializer(),
use_bias=False,
name="top_conv",
)(x)
x = keras.layers.BatchNormalization(
axis=3,
name="top_bn",
)(x)
x = keras.layers.Activation(
activation=activation, name="top_activation"
)(x)
pyramid_level_inputs.append(utils.get_tensor_input_name(x))
# Create model.
super().__init__(inputs=img_input, outputs=x, **kwargs)
self.include_rescaling = include_rescaling
self.width_coefficient = width_coefficient
self.depth_coefficient = depth_coefficient
self.dropout_rate = dropout_rate
self.drop_connect_rate = drop_connect_rate
self.depth_divisor = depth_divisor
self.activation = activation
self.input_tensor = input_tensor
self.pyramid_level_inputs = {
f"P{i + 1}": name for i, name in enumerate(pyramid_level_inputs)
}
self.stackwise_kernel_sizes = stackwise_kernel_sizes
self.stackwise_num_repeats = stackwise_num_repeats
self.stackwise_input_filters = stackwise_input_filters
self.stackwise_output_filters = stackwise_output_filters
self.stackwise_expansion_ratios = stackwise_expansion_ratios
self.stackwise_strides = stackwise_strides
self.stackwise_squeeze_and_excite_ratios = (
stackwise_squeeze_and_excite_ratios
)
def get_config(self):
config = super().get_config()
config.update(
{
"include_rescaling": self.include_rescaling,
"width_coefficient": self.width_coefficient,
"depth_coefficient": self.depth_coefficient,
"dropout_rate": self.dropout_rate,
"drop_connect_rate": self.drop_connect_rate,
"depth_divisor": self.depth_divisor,
"activation": self.activation,
"input_tensor": self.input_tensor,
"input_shape": self.input_shape[1:],
"trainable": self.trainable,
"stackwise_kernel_sizes": self.stackwise_kernel_sizes,
"stackwise_num_repeats": self.stackwise_num_repeats,
"stackwise_input_filters": self.stackwise_input_filters,
"stackwise_output_filters": self.stackwise_output_filters,
"stackwise_expansion_ratios": self.stackwise_expansion_ratios,
"stackwise_strides": self.stackwise_strides,
"stackwise_squeeze_and_excite_ratios": (
self.stackwise_squeeze_and_excite_ratios
),
}
)
return config
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return copy.deepcopy(backbone_presets)
def conv_kernel_initializer(scale=2.0):
return keras.initializers.VarianceScaling(
scale=scale, mode="fan_out", distribution="truncated_normal"
)
def round_filters(filters, width_coefficient, divisor):
"""Round number of filters based on depth multiplier.
Args:
filters: int, number of filters for Conv layer
width_coefficient: float, denotes the scaling coefficient of network
width
divisor: int, a unit of network width
Returns:
int, new rounded filters value for Conv layer
"""
filters *= width_coefficient
new_filters = max(divisor, int(filters + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_filters < 0.9 * filters:
new_filters += divisor
return int(new_filters)
def round_repeats(repeats, depth_coefficient):
"""Round number of repeats based on depth multiplier.
Args:
repeats: int, number of repeats of efficientnet block
depth_coefficient: float, denotes the scaling coefficient of network
depth
Returns:
int, rounded repeats
"""
return int(math.ceil(depth_coefficient * repeats))
def apply_efficientnet_block(
inputs,
filters_in=32,
filters_out=16,
kernel_size=3,
strides=1,
activation="swish",
expand_ratio=1,
se_ratio=0.0,
dropout_rate=0.0,
name="",
):
"""An inverted residual block.
Args:
inputs: Tensor, The input tensor of the block
filters_in: integer, the number of input filters.
filters_out: integer, the number of output filters.
kernel_size: integer, the dimension of the convolution window.
strides: integer, the stride of the convolution.
activation: activation function to use between each convolutional layer.
expand_ratio: integer, scaling coefficient for the input filters.
se_ratio: float between 0 and 1, fraction to squeeze the input filters.
dropout_rate: float between 0 and 1, fraction of the input units to drop.
name: string, block label.
Returns:
output tensor for the block.
""" # noqa: E501
filters = filters_in * expand_ratio
if expand_ratio != 1:
x = keras.layers.Conv2D(
filters=filters,
kernel_size=1,
strides=1,
padding="same",
use_bias=False,
kernel_initializer=conv_kernel_initializer(),
name=name + "expand_conv",
)(inputs)
x = keras.layers.BatchNormalization(
axis=3,
name=name + "expand_bn",
)(x)
x = keras.layers.Activation(
activation, name=name + "expand_activation"
)(x)
else:
x = inputs
# Depthwise Convolution
if strides == 2:
x = keras.layers.ZeroPadding2D(
padding=utils.correct_pad_downsample(x, kernel_size),
name=name + "dwconv_pad",
)(x)
conv_pad = "valid"
else:
conv_pad = "same"
x = keras.layers.DepthwiseConv2D(
kernel_size=kernel_size,
strides=strides,
padding=conv_pad,
use_bias=False,
depthwise_initializer=conv_kernel_initializer(),
name=name + "dwconv",
)(x)
x = keras.layers.BatchNormalization(
axis=3,
name=name + "dwconv_bn",
)(x)
x = keras.layers.Activation(activation, name=name + "dwconv_activation")(x)
# Squeeze and Excitation phase
if 0 < se_ratio <= 1:
filters_se = max(1, int(filters_in * se_ratio))
se = keras.layers.GlobalAveragePooling2D(name=name + "se_squeeze")(x)
se_shape = (1, 1, filters)
se = keras.layers.Reshape(se_shape, name=name + "se_reshape")(se)
se = keras.layers.Conv2D(
filters_se,
1,
padding="same",
activation=activation,
kernel_initializer=conv_kernel_initializer(),
name=name + "se_reduce",
)(se)
se = keras.layers.Conv2D(
filters,
1,
padding="same",
activation="sigmoid",
kernel_initializer=conv_kernel_initializer(),
name=name + "se_expand",
)(se)
x = keras.layers.multiply([x, se], name=name + "se_excite")
# Output phase
x = keras.layers.Conv2D(
filters=filters_out,
kernel_size=1,
strides=1,
padding="same",
use_bias=False,
kernel_initializer=conv_kernel_initializer(),
name=name + "project",
)(x)
x = keras.layers.BatchNormalization(
axis=3,
name=name + "project_bn",
)(x)
x = keras.layers.Activation(activation, name=name + "project_activation")(x)
if strides == 1 and filters_in == filters_out:
if dropout_rate > 0:
x = keras.layers.Dropout(
dropout_rate,
noise_shape=(None, 1, 1, 1),
name=name + "drop",
)(x)
x = keras.layers.Add(name=name + "add")([x, inputs])
return x
|
5f22a1084ea792f0b1b5fa4435811c2d7edd6552
|
4d8df3fd1c531ea93068b56218b6f9aa44fd898e
|
/ops/pebble.py
|
8d107cb8417b423712309f41894a757ebc959c30
|
[
"Apache-2.0"
] |
permissive
|
canonical/operator
|
f7adb02afe5209f1dd711a6b4c25bfaedacb2d02
|
c4e3266a6568ba310064ca8b9bff7adb89676224
|
refs/heads/main
| 2023-09-01T14:07:37.685404
| 2023-08-30T19:55:19
| 2023-08-30T19:55:19
| 212,098,176
| 226
| 109
|
Apache-2.0
| 2023-08-30T19:55:20
| 2019-10-01T13:06:11
|
Python
|
UTF-8
|
Python
| false
| false
| 102,576
|
py
|
pebble.py
|
# Copyright 2021 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for the Pebble API (HTTP over Unix socket).
For a command-line interface for local testing, see test/pebble_cli.py.
"""
import binascii
import copy
import datetime
import email.message
import email.parser
import enum
import http.client
import io
import json
import logging
import os
import select
import shutil
import signal
import socket
import sys
import tempfile
import threading
import time
import typing
import urllib.error
import urllib.parse
import urllib.request
import warnings
from typing import (
IO,
TYPE_CHECKING,
Any,
AnyStr,
BinaryIO,
Callable,
Dict,
Generator,
Generic,
Iterable,
List,
Optional,
Sequence,
TextIO,
Tuple,
Union,
)
import websocket # type: ignore
from ops._private import timeconv, yaml
# Public as these are used in the Container.add_layer signature
ServiceDict = typing.TypedDict('ServiceDict',
{'summary': str,
'description': str,
'startup': str,
'override': str,
'command': str,
'after': Sequence[str],
'before': Sequence[str],
'requires': Sequence[str],
'environment': Dict[str, str],
'user': str,
'user-id': Optional[int],
'group': str,
'group-id': Optional[int],
'working-dir': str,
'on-success': str,
'on-failure': str,
'on-check-failure': Dict[str, Any],
'backoff-delay': str,
'backoff-factor': Optional[int],
'backoff-limit': str,
'kill-delay': Optional[str],
},
total=False)
HttpDict = typing.TypedDict('HttpDict',
{'url': str,
'headers': Dict[str, str]},
total=False)
TcpDict = typing.TypedDict('TcpDict',
{'port': int,
'host': str},
total=False)
ExecDict = typing.TypedDict('ExecDict',
{'command': str,
# see JujuVersion.supports_exec_service_context
'service-context': str,
'environment': Dict[str, str],
'user-id': Optional[int],
'user': str,
'group-id': Optional[int],
'group': str,
'working-dir': str},
total=False)
CheckDict = typing.TypedDict('CheckDict',
{'override': str,
'level': Union['CheckLevel', str],
'period': Optional[str],
'timeout': Optional[str],
'http': Optional[HttpDict],
'tcp': Optional[TcpDict],
'exec': Optional[ExecDict],
'threshold': Optional[int]},
total=False)
LayerDict = typing.TypedDict('LayerDict',
{'summary': str,
'description': str,
'services': Dict[str, ServiceDict],
'checks': Dict[str, CheckDict]},
total=False)
PlanDict = typing.TypedDict('PlanDict',
{'services': Dict[str, ServiceDict],
'checks': Dict[str, CheckDict]},
total=False)
if TYPE_CHECKING:
from typing_extensions import Literal, Protocol, TypedDict
# callback types for _MultiParser header and body handlers
class _BodyHandler(Protocol):
def __call__(self, data: bytes, done: bool = False) -> None: ... # noqa
_HeaderHandler = Callable[[bytes], None]
# tempfile.NamedTemporaryFile has an odd interface because of that
# 'name' attribute, so we need to make a Protocol for it.
class _Tempfile(Protocol):
name = ''
def write(self, data: bytes): ... # noqa
def close(self): ... # noqa
class _FileLikeIO(Protocol[typing.AnyStr]): # That also covers TextIO and BytesIO
def read(self, __n: int = ...) -> typing.AnyStr: ... # for BinaryIO # noqa
def write(self, __s: typing.AnyStr) -> int: ... # noqa
def __enter__(self) -> typing.IO[typing.AnyStr]: ... # noqa
_AnyStrFileLikeIO = Union[_FileLikeIO[bytes], _FileLikeIO[str]]
_TextOrBinaryIO = Union[TextIO, BinaryIO]
_IOSource = Union[str, bytes, _AnyStrFileLikeIO]
_SystemInfoDict = TypedDict('_SystemInfoDict', {'version': str})
_CheckInfoDict = TypedDict('_CheckInfoDict',
{"name": str,
"level": Optional[Union['CheckLevel', str]],
"status": Union['CheckStatus', str],
"failures": int,
"threshold": int})
_FileInfoDict = TypedDict('_FileInfoDict',
{"path": str,
"name": str,
"size": Optional[int],
"permissions": str,
"last-modified": str,
"user-id": Optional[int],
"user": Optional[str],
"group-id": Optional[int],
"group": Optional[str],
"type": Union['FileType', str]})
_AuthDict = TypedDict('_AuthDict',
{'permissions': Optional[str],
'user-id': Optional[int],
'user': Optional[str],
'group-id': Optional[int],
'group': Optional[str],
'path': Optional[str],
'make-dirs': Optional[bool],
'make-parents': Optional[bool],
}, total=False)
_ServiceInfoDict = TypedDict('_ServiceInfoDict',
{'startup': Union['ServiceStartup', str],
'current': Union['ServiceStatus', str],
'name': str})
_ProgressDict = TypedDict('_ProgressDict',
{'label': str,
'done': int,
'total': int})
_TaskDict = TypedDict('_TaskDict',
{'id': 'TaskID',
'kind': str,
'summary': str,
'status': str,
'log': Optional[List[str]],
'progress': _ProgressDict,
'spawn-time': str,
'ready-time': str,
'data': Optional[Dict[str, Any]]})
_ChangeDict = TypedDict('_ChangeDict',
{'id': str,
'kind': str,
'summary': str,
'status': str,
'ready': bool,
'spawn-time': str,
'tasks': Optional[List[_TaskDict]],
'err': Optional[str],
'ready-time': Optional[str],
'data': Optional[Dict[str, Any]]})
_Error = TypedDict('_Error',
{'kind': str,
'message': str})
_Item = TypedDict('_Item',
{'path': str,
'error': _Error})
_FilesResponse = TypedDict('_FilesResponse',
{'result': List[_Item]})
_WarningDict = TypedDict('_WarningDict',
{'message': str,
'first-added': str,
'last-added': str,
'last-shown': Optional[str],
'expire-after': str,
'repeat-after': str})
class _WebSocket(Protocol):
def connect(self, url: str, socket: socket.socket): ... # noqa
def shutdown(self): ... # noqa
def send(self, payload: str): ... # noqa
def send_binary(self, payload: bytes): ... # noqa
def recv(self) -> Union[str, bytes]: ... # noqa
logger = logging.getLogger(__name__)
class _NotProvidedFlag:
pass
_not_provided = _NotProvidedFlag()
class _UnixSocketConnection(http.client.HTTPConnection):
"""Implementation of HTTPConnection that connects to a named Unix socket."""
def __init__(self, host: str, socket_path: str,
timeout: Union[_NotProvidedFlag, float] = _not_provided):
if timeout is _not_provided:
super().__init__(host)
else:
assert isinstance(timeout, (int, float)), timeout # type guard for pyright
super().__init__(host, timeout=timeout)
self.socket_path = socket_path
def connect(self):
"""Override connect to use Unix socket (instead of TCP socket)."""
if not hasattr(socket, 'AF_UNIX'):
raise NotImplementedError(f'Unix sockets not supported on {sys.platform}')
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.connect(self.socket_path)
if self.timeout is not _not_provided:
self.sock.settimeout(self.timeout)
class _UnixSocketHandler(urllib.request.AbstractHTTPHandler):
"""Implementation of HTTPHandler that uses a named Unix socket."""
def __init__(self, socket_path: str):
super().__init__()
self.socket_path = socket_path
def http_open(self, req: urllib.request.Request):
"""Override http_open to use a Unix socket connection (instead of TCP)."""
return self.do_open(_UnixSocketConnection, req, # type:ignore
socket_path=self.socket_path)
def _format_timeout(timeout: float) -> str:
"""Format timeout for use in the Pebble API.
The format is in seconds with a millisecond resolution and an 's' suffix,
as accepted by the Pebble API (which uses Go's time.ParseDuration).
"""
return f'{timeout:.3f}s'
def _start_thread(target: Callable[..., Any], *args: Any, **kwargs: Any) -> threading.Thread:
"""Helper to simplify starting a thread."""
thread = threading.Thread(target=target, args=args, kwargs=kwargs)
thread.start()
return thread
class Error(Exception):
"""Base class of most errors raised by the Pebble client."""
def __repr__(self):
return f'<{type(self).__module__}.{type(self).__name__} {self.args}>'
class TimeoutError(TimeoutError, Error):
"""Raised when a polling timeout occurs."""
class ConnectionError(Error):
"""Raised when the Pebble client can't connect to the socket."""
class ProtocolError(Error):
"""Raised when there's a higher-level protocol error talking to Pebble."""
class PathError(Error):
"""Raised when there's an error with a specific path."""
kind: typing.Literal["not-found", "permission-denied", "generic-file-error"]
"""Short string representing the kind of error."""
message: str
"""Human-readable error message from the API."""
def __init__(self, kind: str, message: str):
"""This shouldn't be instantiated directly."""
self.kind = kind # type: ignore
self.message = message
def __str__(self):
return f'{self.kind} - {self.message}'
def __repr__(self):
return f'PathError({self.kind!r}, {self.message!r})'
class APIError(Error):
"""Raised when an HTTP API error occurs talking to the Pebble server."""
body: Dict[str, Any]
"""Body of the HTTP response, parsed as JSON."""
code: int
"""HTTP status code."""
status: str
"""HTTP status string (reason)."""
message: str
"""Human-readable error message from the API."""
def __init__(self, body: Dict[str, Any], code: int, status: str, message: str):
"""This shouldn't be instantiated directly."""
super().__init__(message) # Makes str(e) return message
self.body = body
self.code = code
self.status = status
self.message = message
def __repr__(self):
return f'APIError({self.body!r}, {self.code!r}, {self.status!r}, {self.message!r})'
class ChangeError(Error):
"""Raised by actions when a change is ready but has an error."""
err: str
"""Human-readable error message."""
change: 'Change'
"""Change object associated with this error."""
def __init__(self, err: str, change: 'Change'):
"""This shouldn't be instantiated directly."""
self.err = err
self.change = change
def __str__(self):
parts = [self.err]
# Append any task logs to the error message
for i, task in enumerate(self.change.tasks):
if not task.log:
continue
parts.append(f'\n----- Logs from task {i} -----\n')
parts.append('\n'.join(task.log))
if len(parts) > 1:
parts.append('\n-----')
return ''.join(parts)
def __repr__(self):
return f'ChangeError({self.err!r}, {self.change!r})'
class ExecError(Error, Generic[AnyStr]):
"""Raised when a :meth:`Client.exec` command returns a non-zero exit code."""
STR_MAX_OUTPUT = 1024
"""Maximum number of characters that stdout/stderr are truncated to in ``__str__``."""
command: List[str]
"""Command line of command being executed."""
exit_code: int
"""The process's exit code. Because this is an error, this will always be non-zero."""
stdout: Optional[AnyStr]
"""Standard output from the process.
If :meth:`ExecProcess.wait_output` was being called, this is the captured
stdout as a str (or bytes if encoding was None). If :meth:`ExecProcess.wait`
was being called, this is None.
"""
stderr: Optional[AnyStr]
"""Standard error from the process.
If :meth:`ExecProcess.wait_output` was being called and ``combine_stderr``
was False, this is the captured stderr as a str (or bytes if encoding was
None). If :meth:`ExecProcess.wait` was being called or ``combine_stderr``
was True, this is None.
"""
def __init__(
self,
command: List[str],
exit_code: int,
stdout: Optional[AnyStr],
stderr: Optional[AnyStr],
):
self.command = command
self.exit_code = exit_code
self.stdout = stdout
self.stderr = stderr
def __str__(self):
message = f'non-zero exit code {self.exit_code} executing {self.command!r}'
for name, out in [('stdout', self.stdout), ('stderr', self.stderr)]:
if out is None:
continue
truncated = ' [truncated]' if len(out) > self.STR_MAX_OUTPUT else ''
out = out[:self.STR_MAX_OUTPUT]
message = f'{message}, {name}={out!r}{truncated}'
return message
class WarningState(enum.Enum):
"""Enum of states for get_warnings() select parameter."""
ALL = 'all'
PENDING = 'pending'
class ChangeState(enum.Enum):
"""Enum of states for get_changes() select parameter."""
ALL = 'all'
IN_PROGRESS = 'in-progress'
READY = 'ready'
class SystemInfo:
"""System information object."""
def __init__(self, version: str):
self.version = version
@classmethod
def from_dict(cls, d: '_SystemInfoDict') -> 'SystemInfo':
"""Create new SystemInfo object from dict parsed from JSON."""
return cls(version=d['version'])
def __repr__(self):
return f'SystemInfo(version={self.version!r})'
class Warning:
"""Warning object."""
def __init__(
self,
message: str,
first_added: datetime.datetime,
last_added: datetime.datetime,
last_shown: Optional[datetime.datetime],
expire_after: str,
repeat_after: str,
):
self.message = message
self.first_added = first_added
self.last_added = last_added
self.last_shown = last_shown
self.expire_after = expire_after
self.repeat_after = repeat_after
@classmethod
def from_dict(cls, d: '_WarningDict') -> 'Warning':
"""Create new Warning object from dict parsed from JSON."""
return cls(
message=d['message'],
first_added=timeconv.parse_rfc3339(d['first-added']),
last_added=timeconv.parse_rfc3339(d['last-added']),
last_shown=(timeconv.parse_rfc3339(d['last-shown']) # type: ignore
if d.get('last-shown') else None),
expire_after=d['expire-after'],
repeat_after=d['repeat-after'],
)
def __repr__(self):
return ('Warning('
'message={self.message!r}, '
'first_added={self.first_added!r}, '
'last_added={self.last_added!r}, '
'last_shown={self.last_shown!r}, '
'expire_after={self.expire_after!r}, '
'repeat_after={self.repeat_after!r})'
).format(self=self)
class TaskProgress:
"""Task progress object."""
def __init__(
self,
label: str,
done: int,
total: int,
):
self.label = label
self.done = done
self.total = total
@classmethod
def from_dict(cls, d: '_ProgressDict') -> 'TaskProgress':
"""Create new TaskProgress object from dict parsed from JSON."""
return cls(
label=d['label'],
done=d['done'],
total=d['total'],
)
def __repr__(self):
return ('TaskProgress('
'label={self.label!r}, '
'done={self.done!r}, '
'total={self.total!r})'
).format(self=self)
class TaskID(str):
"""Task ID (a more strongly-typed string)."""
def __repr__(self):
return f'TaskID({str(self)!r})'
class Task:
"""Task object."""
def __init__(
self,
id: TaskID,
kind: str,
summary: str,
status: str,
log: List[str],
progress: TaskProgress,
spawn_time: datetime.datetime,
ready_time: Optional[datetime.datetime],
data: Optional[Dict[str, Any]] = None,
):
self.id = id
self.kind = kind
self.summary = summary
self.status = status
self.log = log
self.progress = progress
self.spawn_time = spawn_time
self.ready_time = ready_time
self.data = data or {}
@classmethod
def from_dict(cls, d: '_TaskDict') -> 'Task':
"""Create new Task object from dict parsed from JSON."""
return cls(
id=TaskID(d['id']),
kind=d['kind'],
summary=d['summary'],
status=d['status'],
log=d.get('log') or [],
progress=TaskProgress.from_dict(d['progress']),
spawn_time=timeconv.parse_rfc3339(d['spawn-time']),
ready_time=(timeconv.parse_rfc3339(d['ready-time'])
if d.get('ready-time') else None),
data=d.get('data') or {},
)
def __repr__(self):
return ('Task('
'id={self.id!r}, '
'kind={self.kind!r}, '
'summary={self.summary!r}, '
'status={self.status!r}, '
'log={self.log!r}, '
'progress={self.progress!r}, '
'spawn_time={self.spawn_time!r}, '
'ready_time={self.ready_time!r}, '
'data={self.data!r})'
).format(self=self)
class ChangeID(str):
"""Change ID (a more strongly-typed string)."""
def __repr__(self):
return f'ChangeID({str(self)!r})'
class Change:
"""Change object."""
def __init__(
self,
id: ChangeID,
kind: str,
summary: str,
status: str,
tasks: List[Task],
ready: bool,
err: Optional[str],
spawn_time: datetime.datetime,
ready_time: Optional[datetime.datetime],
data: Optional[Dict[str, Any]] = None,
):
self.id = id
self.kind = kind
self.summary = summary
self.status = status
self.tasks = tasks
self.ready = ready
self.err = err
self.spawn_time = spawn_time
self.ready_time = ready_time
self.data = data or {}
@classmethod
def from_dict(cls, d: '_ChangeDict') -> 'Change':
"""Create new Change object from dict parsed from JSON."""
return cls(
id=ChangeID(d['id']),
kind=d['kind'],
summary=d['summary'],
status=d['status'],
tasks=[Task.from_dict(t) for t in d.get('tasks') or []],
ready=d['ready'],
err=d.get('err'),
spawn_time=timeconv.parse_rfc3339(d['spawn-time']),
ready_time=(timeconv.parse_rfc3339(d['ready-time']) # type: ignore
if d.get('ready-time') else None),
data=d.get('data') or {},
)
def __repr__(self):
return ('Change('
'id={self.id!r}, '
'kind={self.kind!r}, '
'summary={self.summary!r}, '
'status={self.status!r}, '
'tasks={self.tasks!r}, '
'ready={self.ready!r}, '
'err={self.err!r}, '
'spawn_time={self.spawn_time!r}, '
'ready_time={self.ready_time!r}, '
'data={self.data!r})'
).format(self=self)
class Plan:
"""Represents the effective Pebble configuration.
A plan is the combined layer configuration. The layer configuration is
documented at https://github.com/canonical/pebble/#layer-specification.
"""
def __init__(self, raw: str):
d = yaml.safe_load(raw) or {} # type: ignore
d = typing.cast('PlanDict', d)
self._raw = raw
self._services: Dict[str, Service] = {name: Service(name, service)
for name, service in d.get('services', {}).items()}
self._checks: Dict[str, Check] = {name: Check(name, check)
for name, check in d.get('checks', {}).items()}
@property
def services(self) -> Dict[str, 'Service']:
"""This plan's services mapping (maps service name to Service).
This property is currently read-only.
"""
return self._services
@property
def checks(self) -> Dict[str, 'Check']:
"""This plan's checks mapping (maps check name to :class:`Check`).
This property is currently read-only.
"""
return self._checks
def to_dict(self) -> 'PlanDict':
"""Convert this plan to its dict representation."""
fields = [
('services', {name: service.to_dict() for name, service in self._services.items()}),
('checks', {name: check.to_dict() for name, check in self._checks.items()}),
]
dct = {name: value for name, value in fields if value}
return typing.cast('PlanDict', dct)
def to_yaml(self) -> str:
"""Return this plan's YAML representation."""
return yaml.safe_dump(self.to_dict())
__str__ = to_yaml
class Layer:
"""Represents a Pebble configuration layer.
The format of this is documented at
https://github.com/canonical/pebble/#layer-specification.
"""
#: Summary of the purpose of this layer.
summary: str
#: Long-form description of this layer.
description: str
#: Mapping of name to :class:`Service` defined by this layer.
services: Dict[str, 'Service']
#: Mapping of check to :class:`Check` defined by this layer.
checks: Dict[str, 'Check']
def __init__(self, raw: Optional[Union[str, 'LayerDict']] = None):
if isinstance(raw, str):
d = yaml.safe_load(raw) or {} # type: ignore # (Any 'raw' type)
else:
d = raw or {}
d = typing.cast('LayerDict', d)
self.summary = d.get('summary', '')
self.description = d.get('description', '')
self.services = {name: Service(name, service)
for name, service in d.get('services', {}).items()}
self.checks = {name: Check(name, check)
for name, check in d.get('checks', {}).items()}
def to_yaml(self) -> str:
"""Convert this layer to its YAML representation."""
return yaml.safe_dump(self.to_dict())
def to_dict(self) -> 'LayerDict':
"""Convert this layer to its dict representation."""
fields = [
('summary', self.summary),
('description', self.description),
('services', {name: service.to_dict() for name, service in self.services.items()}),
('checks', {name: check.to_dict() for name, check in self.checks.items()}),
]
dct = {name: value for name, value in fields if value}
return typing.cast('LayerDict', dct)
def __repr__(self) -> str:
return f'Layer({self.to_dict()!r})'
def __eq__(self, other: Union['LayerDict', 'Layer']) -> bool:
"""Reports whether this layer configuration is equal to another."""
if isinstance(other, dict):
return self.to_dict() == other
elif isinstance(other, Layer):
return self.to_dict() == other.to_dict()
else:
return NotImplemented
__str__ = to_yaml
class Service:
"""Represents a service description in a Pebble configuration layer."""
def __init__(self, name: str, raw: Optional['ServiceDict'] = None):
self.name = name
dct: ServiceDict = raw or {}
self.summary = dct.get('summary', '')
self.description = dct.get('description', '')
self.startup = dct.get('startup', '')
self.override = dct.get('override', '')
self.command = dct.get('command', '')
self.after = list(dct.get('after', []))
self.before = list(dct.get('before', []))
self.requires = list(dct.get('requires', []))
self.environment = dict(dct.get('environment', {}))
self.user = dct.get('user', '')
self.user_id = dct.get('user-id')
self.group = dct.get('group', '')
self.group_id = dct.get('group-id')
self.working_dir = dct.get('working-dir', '')
self.on_success = dct.get('on-success', '')
self.on_failure = dct.get('on-failure', '')
self.on_check_failure = dict(dct.get('on-check-failure', {}))
self.backoff_delay = dct.get('backoff-delay', '')
self.backoff_factor = dct.get('backoff-factor')
self.backoff_limit = dct.get('backoff-limit', '')
self.kill_delay = dct.get('kill-delay', '')
def to_dict(self) -> 'ServiceDict':
"""Convert this service object to its dict representation."""
fields = [
('summary', self.summary),
('description', self.description),
('startup', self.startup),
('override', self.override),
('command', self.command),
('after', self.after),
('before', self.before),
('requires', self.requires),
('environment', self.environment),
('user', self.user),
('user-id', self.user_id),
('group', self.group),
('group-id', self.group_id),
('working-dir', self.working_dir),
('on-success', self.on_success),
('on-failure', self.on_failure),
('on-check-failure', self.on_check_failure),
('backoff-delay', self.backoff_delay),
('backoff-factor', self.backoff_factor),
('backoff-limit', self.backoff_limit),
('kill-delay', self.kill_delay),
]
dct = {name: value for name, value in fields if value}
return typing.cast('ServiceDict', dct)
def _merge(self, other: 'Service'):
"""Merges this service object with another service definition.
For attributes present in both objects, the passed in service
attributes take precedence.
"""
for name, value in other.__dict__.items():
if not value or name == 'name':
continue
if name in ['after', 'before', 'requires']:
getattr(self, name).extend(value)
elif name in ['environment', 'on_check_failure']:
getattr(self, name).update(value)
else:
setattr(self, name, value)
def __repr__(self) -> str:
return f'Service({self.to_dict()!r})'
def __eq__(self, other: Union['ServiceDict', 'Service']) -> bool:
"""Reports whether this service configuration is equal to another."""
if isinstance(other, dict):
return self.to_dict() == other
elif isinstance(other, Service):
return self.to_dict() == other.to_dict()
else:
return NotImplemented
class ServiceStartup(enum.Enum):
"""Enum of service startup options."""
ENABLED = 'enabled'
DISABLED = 'disabled'
class ServiceStatus(enum.Enum):
"""Enum of service statuses."""
ACTIVE = 'active'
INACTIVE = 'inactive'
ERROR = 'error'
class ServiceInfo:
"""Service status information."""
def __init__(
self,
name: str,
startup: Union[ServiceStartup, str],
current: Union[ServiceStatus, str],
):
self.name = name
self.startup = startup
self.current = current
def is_running(self) -> bool:
"""Return True if this service is running (in the active state)."""
return self.current == ServiceStatus.ACTIVE
@classmethod
def from_dict(cls, d: '_ServiceInfoDict') -> 'ServiceInfo':
"""Create new ServiceInfo object from dict parsed from JSON."""
try:
startup = ServiceStartup(d['startup'])
except ValueError:
startup = d['startup']
try:
current = ServiceStatus(d['current'])
except ValueError:
current = d['current']
return cls(
name=d['name'],
startup=startup,
current=current,
)
def __repr__(self):
return ('ServiceInfo('
'name={self.name!r}, '
'startup={self.startup}, '
'current={self.current})'
).format(self=self)
class Check:
"""Represents a check in a Pebble configuration layer."""
def __init__(self, name: str, raw: Optional['CheckDict'] = None):
self.name = name
dct: CheckDict = raw or {}
self.override: str = dct.get('override', '')
try:
level: Union[CheckLevel, str] = CheckLevel(dct.get('level', ''))
except ValueError:
level = dct.get('level', '')
self.level = level
self.period: Optional[str] = dct.get('period', '')
self.timeout: Optional[str] = dct.get('timeout', '')
self.threshold: Optional[int] = dct.get('threshold')
http = dct.get('http')
if http is not None:
http = copy.deepcopy(http)
self.http: Optional[HttpDict] = http
tcp = dct.get('tcp')
if tcp is not None:
tcp = copy.deepcopy(tcp)
self.tcp: Optional[TcpDict] = tcp
exec_ = dct.get('exec')
if exec_ is not None:
exec_ = copy.deepcopy(exec_)
self.exec: Optional[ExecDict] = exec_
def to_dict(self) -> 'CheckDict':
"""Convert this check object to its dict representation."""
level: str = self.level.value if isinstance(self.level, CheckLevel) else self.level
fields = [
('override', self.override),
('level', level),
('period', self.period),
('timeout', self.timeout),
('threshold', self.threshold),
('http', self.http),
('tcp', self.tcp),
('exec', self.exec),
]
dct = {name: value for name, value in fields if value}
return typing.cast('CheckDict', dct)
def __repr__(self) -> str:
return f'Check({self.to_dict()!r})'
def __eq__(self, other: Union['CheckDict', 'Check']) -> bool:
"""Reports whether this check configuration is equal to another."""
if isinstance(other, dict):
return self.to_dict() == other
elif isinstance(other, Check):
return self.to_dict() == other.to_dict()
else:
return NotImplemented
class CheckLevel(enum.Enum):
"""Enum of check levels."""
UNSET = ''
ALIVE = 'alive'
READY = 'ready'
class CheckStatus(enum.Enum):
"""Enum of check statuses."""
UP = 'up'
DOWN = 'down'
class FileType(enum.Enum):
"""Enum of file types."""
FILE = 'file'
DIRECTORY = 'directory'
SYMLINK = 'symlink'
SOCKET = 'socket'
NAMED_PIPE = 'named-pipe'
DEVICE = 'device'
UNKNOWN = 'unknown'
class FileInfo:
"""Stat-like information about a single file or directory."""
path: str
"""Full path of the file."""
name: str
"""Base name of the file."""
type: Union['FileType', str]
"""Type of the file ("file", "directory", "symlink", etc)."""
size: Optional[int]
"""Size of the file (will be 0 if ``type`` is not "file")."""
permissions: int
"""Unix permissions of the file."""
last_modified: datetime.datetime
"""Time file was last modified."""
user_id: Optional[int]
"""User ID of the file."""
user: Optional[str]
"""Username of the file."""
group_id: Optional[int]
"""Group ID of the file."""
group: Optional[str]
"""Group name of the file."""
def __init__(
self,
path: str,
name: str,
type: Union['FileType', str],
size: Optional[int],
permissions: int,
last_modified: datetime.datetime,
user_id: Optional[int],
user: Optional[str],
group_id: Optional[int],
group: Optional[str],
):
self.path = path
self.name = name
self.type = type
self.size = size
self.permissions = permissions
self.last_modified = last_modified
self.user_id = user_id
self.user = user
self.group_id = group_id
self.group = group
@classmethod
def from_dict(cls, d: '_FileInfoDict') -> 'FileInfo':
"""Create new FileInfo object from dict parsed from JSON."""
try:
file_type = FileType(d['type'])
except ValueError:
file_type = d['type']
return cls(
path=d['path'],
name=d['name'],
type=file_type,
size=d.get('size'),
permissions=int(d['permissions'], 8),
last_modified=timeconv.parse_rfc3339(d['last-modified']),
user_id=d.get('user-id'),
user=d.get('user'),
group_id=d.get('group-id'),
group=d.get('group'),
)
def __repr__(self):
return ('FileInfo('
'path={self.path!r}, '
'name={self.name!r}, '
'type={self.type}, '
'size={self.size}, '
'permissions=0o{self.permissions:o}, '
'last_modified={self.last_modified!r}, '
'user_id={self.user_id}, '
'user={self.user!r}, '
'group_id={self.group_id}, '
'group={self.group!r})'
).format(self=self)
class CheckInfo:
"""Check status information.
A list of these objects is returned from :meth:`Client.get_checks`.
"""
name: str
"""Name of the check."""
level: Optional[Union[CheckLevel, str]]
"""Check level.
This can be :attr:`CheckLevel.ALIVE`, :attr:`CheckLevel.READY`, or None (level not set).
"""
status: Union[CheckStatus, str]
"""Status of the check.
:attr:`CheckStatus.UP` means the check is healthy (the number of failures
is less than the threshold), :attr:`CheckStatus.DOWN` means the check is
unhealthy (the number of failures has reached the threshold).
"""
failures: int
"""Number of failures since the check last succeeded.
This is reset to zero if the check succeeds.
"""
threshold: int
"""Failure threshold.
This is how many consecutive failures for the check to be considered "down".
"""
def __init__(
self,
name: str,
level: Optional[Union[CheckLevel, str]],
status: Union[CheckStatus, str],
failures: int = 0,
threshold: int = 0,
):
self.name = name
self.level = level
self.status = status
self.failures = failures
self.threshold = threshold
@classmethod
def from_dict(cls, d: '_CheckInfoDict') -> 'CheckInfo':
"""Create new :class:`CheckInfo` object from dict parsed from JSON."""
try:
level = CheckLevel(d.get('level', ''))
except ValueError:
level = d.get('level')
try:
status = CheckStatus(d['status'])
except ValueError:
status = d['status']
return cls(
name=d['name'],
level=level,
status=status,
failures=d.get('failures', 0),
threshold=d['threshold'],
)
def __repr__(self):
return ('CheckInfo('
'name={self.name!r}, '
'level={self.level!r}, '
'status={self.status}, '
'failures={self.failures}, '
'threshold={self.threshold!r})'
).format(self=self)
class ExecProcess(Generic[AnyStr]):
"""Represents a process started by :meth:`Client.exec`.
To avoid deadlocks, most users should use :meth:`wait_output` instead of
reading and writing the :attr:`stdin`, :attr:`stdout`, and :attr:`stderr`
attributes directly. Alternatively, users can pass stdin/stdout/stderr to
:meth:`Client.exec`.
This class should not be instantiated directly, only via
:meth:`Client.exec`.
"""
stdin: Optional[IO[AnyStr]]
"""Standard input for the process.
If the stdin argument was not passed to :meth:`Client.exec`, this is a
writable file-like object the caller can use to stream input to the
process. It is None if stdin was passed to :meth:`Client.exec`.
"""
stdout: Optional[IO[AnyStr]]
"""Standard output from the process.
If the stdout argument was not passed to :meth:`Client.exec`, this is a
readable file-like object the caller can use to stream output from the
process. It is None if stdout was passed to :meth:`Client.exec`.
"""
stderr: Optional[IO[AnyStr]]
"""Standard error from the process.
If the stderr argument was not passed to :meth:`Client.exec` and
``combine_stderr`` was False, this is a readable file-like object the
caller can use to stream error output from the process. It is None if
stderr was passed to :meth:`Client.exec` or ``combine_stderr`` was True.
"""
def __init__(
self,
stdin: Optional[IO[AnyStr]],
stdout: Optional[IO[AnyStr]],
stderr: Optional[IO[AnyStr]],
client: 'Client',
timeout: Optional[float],
control_ws: '_WebSocket',
stdio_ws: '_WebSocket',
stderr_ws: Optional['_WebSocket'],
command: List[str],
encoding: Optional[str],
change_id: ChangeID,
cancel_stdin: Optional[Callable[[], None]],
cancel_reader: Optional[int],
threads: List[threading.Thread],
):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self._client = client
self._timeout = timeout
self._control_ws = control_ws
self._stdio_ws = stdio_ws
self._stderr_ws = stderr_ws
self._command = command
self._encoding = encoding
self._change_id = change_id
self._cancel_stdin = cancel_stdin
self._cancel_reader = cancel_reader
self._threads = threads
self._waited = False
def __del__(self):
if not self._waited:
msg = 'ExecProcess instance garbage collected without call to wait() or wait_output()'
warnings.warn(msg, ResourceWarning)
def wait(self):
"""Wait for the process to finish.
If a timeout was specified to the :meth:`Client.exec` call, this waits
at most that duration.
Raises:
ChangeError: if there was an error starting or running the process.
ExecError: if the process exits with a non-zero exit code.
"""
exit_code = self._wait()
if exit_code != 0:
raise ExecError(self._command, exit_code, None, None)
def _wait(self) -> int:
self._waited = True
timeout = self._timeout
if timeout is not None:
# A bit more than the command timeout to ensure that happens first
timeout += 1
change = self._client.wait_change(self._change_id, timeout=timeout)
# If stdin reader thread is running, stop it
if self._cancel_stdin is not None:
self._cancel_stdin()
# Wait for all threads to finish (e.g., message barrier sent)
for thread in self._threads:
thread.join()
# If we opened a cancel_reader pipe, close the read side now (write
# side was already closed by _cancel_stdin().
if self._cancel_reader is not None:
os.close(self._cancel_reader)
# Close websockets (shutdown doesn't send CLOSE message or wait for response).
self._control_ws.shutdown()
self._stdio_ws.shutdown()
if self._stderr_ws is not None:
self._stderr_ws.shutdown()
if change.err:
raise ChangeError(change.err, change)
exit_code = -1
if change.tasks:
exit_code = change.tasks[0].data.get('exit-code', -1)
return exit_code
def wait_output(self) -> Tuple[AnyStr, Optional[AnyStr]]:
"""Wait for the process to finish and return tuple of (stdout, stderr).
If a timeout was specified to the :meth:`Client.exec` call, this waits
at most that duration. If combine_stderr was True, stdout will include
the process's standard error, and stderr will be None.
Raises:
ChangeError: if there was an error starting or running the process.
ExecError: if the process exits with a non-zero exit code.
"""
if self.stdout is None:
raise TypeError(
"can't use wait_output() when exec was called with the stdout argument; "
"use wait() instead"
)
if self._encoding is not None:
out = io.StringIO()
err = io.StringIO() if self.stderr is not None else None
else:
out = io.BytesIO()
err = io.BytesIO() if self.stderr is not None else None
t = _start_thread(shutil.copyfileobj, self.stdout, out)
self._threads.append(t)
if self.stderr is not None:
t = _start_thread(shutil.copyfileobj, self.stderr, err)
self._threads.append(t)
exit_code: int = self._wait()
out_value = typing.cast(AnyStr, out.getvalue())
err_value = typing.cast(AnyStr, err.getvalue()) if err is not None else None
if exit_code != 0:
raise ExecError[AnyStr](self._command, exit_code, out_value, err_value)
return (out_value, err_value)
def send_signal(self, sig: Union[int, str]):
"""Send the given signal to the running process.
Args:
sig: Name or number of signal to send, e.g., "SIGHUP", 1, or
signal.SIGHUP.
"""
if isinstance(sig, int):
sig = signal.Signals(sig).name
payload = {
'command': 'signal',
'signal': {'name': sig},
}
msg = json.dumps(payload, sort_keys=True)
self._control_ws.send(msg)
def _has_fileno(f: Any) -> bool:
"""Return True if the file-like object has a valid fileno() method."""
try:
f.fileno()
return True
except Exception:
# Some types define a fileno method that raises io.UnsupportedOperation,
# but just catching all exceptions here won't hurt.
return False
def _reader_to_websocket(reader: '_WebsocketReader',
ws: '_WebSocket',
encoding: str,
cancel_reader: Optional[int] = None,
bufsize: int = 16 * 1024):
"""Read reader through to EOF and send each chunk read to the websocket."""
while True:
if cancel_reader is not None:
# Wait for either a read to be ready or the caller to cancel stdin
result = select.select([cancel_reader, reader], [], [])
if cancel_reader in result[0]:
break
chunk = reader.read(bufsize)
if not chunk:
break
if isinstance(chunk, str):
chunk = chunk.encode(encoding)
ws.send_binary(chunk)
ws.send('{"command":"end"}') # type: ignore # Send "end" command as TEXT frame to signal EOF
def _websocket_to_writer(ws: '_WebSocket', writer: '_WebsocketWriter',
encoding: Optional[str]):
"""Receive messages from websocket (until end signal) and write to writer."""
while True:
chunk = ws.recv()
if isinstance(chunk, str):
try:
payload = json.loads(chunk)
except ValueError:
# Garbage sent, try to keep going
logger.warning('Cannot decode I/O command (invalid JSON)')
continue
command = payload.get('command')
if command != 'end':
# A command we don't recognize, keep going
logger.warning(f'Invalid I/O command {command!r}')
continue
# Received "end" command (EOF signal), stop thread
break
if encoding is not None:
chunk = chunk.decode(encoding)
writer.write(chunk)
class _WebsocketWriter(io.BufferedIOBase):
"""A writable file-like object that sends what's written to it to a websocket."""
def __init__(self, ws: '_WebSocket'):
self.ws = ws
def writable(self):
"""Denote this file-like object as writable."""
return True
def write(self, chunk: Union[str, bytes]) -> int:
"""Write chunk to the websocket."""
if not isinstance(chunk, bytes):
raise TypeError(f'value to write must be bytes, not {type(chunk).__name__}')
self.ws.send_binary(chunk)
return len(chunk)
def close(self):
"""Send end-of-file message to websocket."""
self.ws.send('{"command":"end"}')
class _WebsocketReader(io.BufferedIOBase):
"""A readable file-like object whose reads come from a websocket."""
def __init__(self, ws: '_WebSocket'):
self.ws = ws
self.remaining = b''
self.eof = False
def readable(self) -> bool:
"""Denote this file-like object as readable."""
return True
def read(self, n: int = -1) -> Union[str, bytes]:
"""Read up to n bytes from the websocket (or one message if n<0)."""
if self.eof:
# Calling read() multiple times after EOF should still return EOF
return b''
while not self.remaining:
chunk = self.ws.recv()
if isinstance(chunk, str):
try:
payload = json.loads(chunk)
except ValueError:
# Garbage sent, try to keep going
logger.warning('Cannot decode I/O command (invalid JSON)')
continue
command = payload.get('command')
if command != 'end':
# A command we don't recognize, keep going
logger.warning(f'Invalid I/O command {command!r}')
continue
# Received "end" command, return EOF designator
self.eof = True
return b''
self.remaining = chunk
if n < 0:
n = len(self.remaining)
result: Union[str, bytes] = self.remaining[:n]
self.remaining = self.remaining[n:]
return result
def read1(self, n: int = -1) -> Union[str, bytes]:
"""An alias for read."""
return self.read(n)
class Client:
"""Pebble API client.
Defaults to using a Unix socket at socket_path (which must be specified
unless a custom opener is provided).
"""
_chunk_size = 8192
def __init__(self, socket_path: str,
opener: Optional[urllib.request.OpenerDirector] = None,
base_url: str = 'http://localhost',
timeout: float = 5.0):
if not isinstance(socket_path, str):
raise TypeError(f'`socket_path` should be a string, not: {type(socket_path)}')
if opener is None:
opener = self._get_default_opener(socket_path)
self.socket_path = socket_path
self.opener = opener
self.base_url = base_url
self.timeout = timeout
@classmethod
def _get_default_opener(cls, socket_path: str) -> urllib.request.OpenerDirector:
"""Build the default opener to use for requests (HTTP over Unix socket)."""
opener = urllib.request.OpenerDirector()
opener.add_handler(_UnixSocketHandler(socket_path))
opener.add_handler(urllib.request.HTTPDefaultErrorHandler())
opener.add_handler(urllib.request.HTTPRedirectHandler())
opener.add_handler(urllib.request.HTTPErrorProcessor())
return opener
# we need to cast the return type depending on the request params
def _request(self,
method: str,
path: str,
query: Optional[Dict[str, Any]] = None,
body: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
"""Make a JSON request to the Pebble server with the given HTTP method and path.
If query dict is provided, it is encoded and appended as a query string
to the URL. If body dict is provided, it is serialied as JSON and used
as the HTTP body (with Content-Type: "application/json"). The resulting
body is decoded from JSON.
"""
headers = {'Accept': 'application/json'}
data = None
if body is not None:
data = json.dumps(body).encode('utf-8')
headers['Content-Type'] = 'application/json'
response = self._request_raw(method, path, query, headers, data)
self._ensure_content_type(response.headers, 'application/json')
raw_resp: Dict[str, Any] = json.loads(response.read())
return raw_resp
@staticmethod
def _ensure_content_type(headers: email.message.Message,
expected: 'Literal["multipart/form-data", "application/json"]'):
"""Parse Content-Type header from headers and ensure it's equal to expected.
Return a dict of any options in the header, e.g., {'boundary': ...}.
"""
ctype = headers.get_content_type()
params = headers.get_params() or {}
options = {key: value for key, value in params if value}
if ctype != expected:
raise ProtocolError(f'expected Content-Type {expected!r}, got {ctype!r}')
return options
def _request_raw(
self, method: str, path: str,
query: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, Any]] = None,
data: Optional[Union[bytes, Generator[bytes, Any, Any]]] = None,
) -> http.client.HTTPResponse:
"""Make a request to the Pebble server; return the raw HTTPResponse object."""
url = self.base_url + path
if query:
url = f"{url}?{urllib.parse.urlencode(query, doseq=True)}"
if headers is None:
headers = {}
request = urllib.request.Request(url, method=method, data=data, headers=headers)
try:
response = self.opener.open(request, timeout=self.timeout)
except urllib.error.HTTPError as e:
code = e.code
status = e.reason
try:
body: Dict[str, Any] = json.loads(e.read())
message: str = body['result']['message']
except (OSError, ValueError, KeyError) as e2:
# Will only happen on read error or if Pebble sends invalid JSON.
body: Dict[str, Any] = {}
message = f'{type(e2).__name__} - {e2}'
raise APIError(body, code, status, message)
except urllib.error.URLError as e:
raise ConnectionError(e.reason)
return response
def get_system_info(self) -> SystemInfo:
"""Get system info."""
resp = self._request('GET', '/v1/system-info')
return SystemInfo.from_dict(resp['result'])
def get_warnings(self, select: WarningState = WarningState.PENDING) -> List[Warning]:
"""Get list of warnings in given state (pending or all)."""
query = {'select': select.value}
resp = self._request('GET', '/v1/warnings', query)
return [Warning.from_dict(w) for w in resp['result']]
def ack_warnings(self, timestamp: datetime.datetime) -> int:
"""Acknowledge warnings up to given timestamp, return number acknowledged."""
body = {'action': 'okay', 'timestamp': timestamp.isoformat()}
resp = self._request('POST', '/v1/warnings', body=body)
return resp['result']
def get_changes(
self, select: ChangeState = ChangeState.IN_PROGRESS, service: Optional[str] = None,
) -> List[Change]:
"""Get list of changes in given state, filter by service name if given."""
query: Dict[str, Union[str, int]] = {'select': select.value}
if service is not None:
query['for'] = service
resp = self._request('GET', '/v1/changes', query)
return [Change.from_dict(c) for c in resp['result']]
def get_change(self, change_id: ChangeID) -> Change:
"""Get single change by ID."""
resp = self._request('GET', f'/v1/changes/{change_id}')
return Change.from_dict(resp['result'])
def abort_change(self, change_id: ChangeID) -> Change:
"""Abort change with given ID."""
body = {'action': 'abort'}
resp = self._request('POST', f'/v1/changes/{change_id}', body=body)
return Change.from_dict(resp['result'])
def autostart_services(self, timeout: float = 30.0, delay: float = 0.1) -> ChangeID:
"""Start the startup-enabled services and wait (poll) for them to be started.
Args:
timeout: Seconds before autostart change is considered timed out (float).
delay: Seconds before executing the autostart change (float).
Returns:
ChangeID of the autostart change.
Raises:
ChangeError: if one or more of the services didn't start. If
timeout is 0, submit the action but don't wait; just return the change
ID immediately.
"""
return self._services_action('autostart', [], timeout, delay)
def replan_services(self, timeout: float = 30.0, delay: float = 0.1) -> ChangeID:
"""Replan by (re)starting changed and startup-enabled services and wait for them to start.
Args:
timeout: Seconds before replan change is considered timed out (float).
delay: Seconds before executing the replan change (float).
Returns:
ChangeID of the replan change.
Raises:
ChangeError: if one or more of the services didn't stop/start. If
timeout is 0, submit the action but don't wait; just return the change
ID immediately.
"""
return self._services_action('replan', [], timeout, delay)
def start_services(
self, services: Iterable[str], timeout: float = 30.0, delay: float = 0.1,
) -> ChangeID:
"""Start services by name and wait (poll) for them to be started.
Args:
services: Non-empty list of services to start.
timeout: Seconds before start change is considered timed out (float).
delay: Seconds before executing the start change (float).
Returns:
ChangeID of the start change.
Raises:
ChangeError: if one or more of the services didn't stop/start. If
timeout is 0, submit the action but don't wait; just return the change
ID immediately.
"""
return self._services_action('start', services, timeout, delay)
def stop_services(
self, services: Iterable[str], timeout: float = 30.0, delay: float = 0.1,
) -> ChangeID:
"""Stop services by name and wait (poll) for them to be started.
Args:
services: Non-empty list of services to stop.
timeout: Seconds before stop change is considered timed out (float).
delay: Seconds before executing the stop change (float).
Returns:
ChangeID of the stop change.
Raises:
ChangeError: if one or more of the services didn't stop/start. If
timeout is 0, submit the action but don't wait; just return the change
ID immediately.
"""
return self._services_action('stop', services, timeout, delay)
def restart_services(
self, services: Iterable[str], timeout: float = 30.0, delay: float = 0.1,
) -> ChangeID:
"""Restart services by name and wait (poll) for them to be started.
Args:
services: Non-empty list of services to restart.
timeout: Seconds before restart change is considered timed out (float).
delay: Seconds before executing the restart change (float).
Returns:
ChangeID of the restart change.
Raises:
ChangeError: if one or more of the services didn't stop/start. If
timeout is 0, submit the action but don't wait; just return the change
ID immediately.
"""
return self._services_action('restart', services, timeout, delay)
def _services_action(
self, action: str, services: Iterable[str], timeout: Optional[float],
delay: float,
) -> ChangeID:
if isinstance(services, (str, bytes)) or not hasattr(services, '__iter__'):
raise TypeError(
f'services must be of type Iterable[str], not {type(services).__name__}')
services = list(services)
for s in services:
if not isinstance(s, str):
raise TypeError(f'service names must be str, not {type(s).__name__}')
body = {'action': action, 'services': services}
resp = self._request('POST', '/v1/services', body=body)
change_id = ChangeID(resp['change'])
if timeout:
change = self.wait_change(change_id, timeout=timeout, delay=delay)
if change.err:
raise ChangeError(change.err, change)
return change_id
def wait_change(
self, change_id: ChangeID,
timeout: Optional[float] = 30.0,
delay: float = 0.1,
) -> Change:
"""Wait for the given change to be ready.
If the Pebble server supports the /v1/changes/{id}/wait API endpoint,
use that to avoid polling, otherwise poll /v1/changes/{id} every delay
seconds.
Args:
change_id: Change ID of change to wait for.
timeout: Maximum time in seconds to wait for the change to be
ready. It may be None, in which case wait_change never times out.
delay: If polling, this is the delay in seconds between attempts.
Returns:
The Change object being waited on.
Raises:
TimeoutError: If the maximum timeout is reached.
"""
try:
return self._wait_change_using_wait(change_id, timeout)
except NotImplementedError:
# Pebble server doesn't support wait endpoint, fall back to polling
return self._wait_change_using_polling(change_id, timeout, delay)
def _wait_change_using_wait(self, change_id: ChangeID, timeout: Optional[float]):
"""Wait for a change to be ready using the wait-change API."""
deadline = time.time() + timeout if timeout is not None else 0
# Hit the wait endpoint every Client.timeout-1 seconds to avoid long
# requests (the -1 is to ensure it wakes up before the socket timeout)
while True:
this_timeout = max(self.timeout - 1, 1) # minimum of 1 second
if timeout is not None:
time_remaining = deadline - time.time()
if time_remaining <= 0:
break
# Wait the lesser of the time remaining and Client.timeout-1
this_timeout = min(time_remaining, this_timeout)
try:
return self._wait_change(change_id, this_timeout)
except TimeoutError:
# Catch timeout from wait endpoint and loop to check deadline
pass
raise TimeoutError(f'timed out waiting for change {change_id} ({timeout} seconds)')
def _wait_change(self, change_id: ChangeID, timeout: Optional[float] = None) -> Change:
"""Call the wait-change API endpoint directly."""
query = {}
if timeout is not None:
query['timeout'] = _format_timeout(timeout)
try:
resp = self._request('GET', f'/v1/changes/{change_id}/wait', query)
except APIError as e:
if e.code == 404:
raise NotImplementedError('server does not implement wait-change endpoint')
if e.code == 504:
raise TimeoutError(f'timed out waiting for change {change_id} ({timeout} seconds)')
raise
return Change.from_dict(resp['result'])
def _wait_change_using_polling(self, change_id: ChangeID, timeout: Optional[float],
delay: float):
"""Wait for a change to be ready by polling the get-change API."""
deadline = time.time() + timeout if timeout is not None else 0
while timeout is None or time.time() < deadline:
change = self.get_change(change_id)
if change.ready:
return change
time.sleep(delay)
raise TimeoutError(f'timed out waiting for change {change_id} ({timeout} seconds)')
def add_layer(
self, label: str, layer: Union[str, 'LayerDict', Layer], *,
combine: bool = False):
"""Dynamically add a new layer onto the Pebble configuration layers.
If combine is False (the default), append the new layer as the top
layer with the given label. If combine is True and the label already
exists, the two layers are combined into a single one considering the
layer override rules; if the layer doesn't exist, it is added as usual.
"""
if not isinstance(label, str):
raise TypeError(f'label must be a str, not {type(label).__name__}')
if isinstance(layer, str):
layer_yaml = layer
elif isinstance(layer, dict):
layer_yaml = Layer(layer).to_yaml()
elif isinstance(layer, Layer):
layer_yaml = layer.to_yaml()
else:
raise TypeError(
f'layer must be str, dict, or pebble.Layer, not {type(layer).__name__}')
body = {
'action': 'add',
'combine': combine,
'label': label,
'format': 'yaml',
'layer': layer_yaml,
}
self._request('POST', '/v1/layers', body=body)
def get_plan(self) -> Plan:
"""Get the Pebble plan (contains combined layer configuration)."""
resp = self._request('GET', '/v1/plan', {'format': 'yaml'})
return Plan(resp['result'])
def get_services(self, names: Optional[Iterable[str]] = None) -> List[ServiceInfo]:
"""Get the service status for the configured services.
If names is specified, only fetch the service status for the services
named.
"""
query = None
if names is not None:
query = {'names': ','.join(names)}
resp = self._request('GET', '/v1/services', query)
return [ServiceInfo.from_dict(info) for info in resp['result']]
def pull(self,
path: str,
*,
encoding: Optional[str] = 'utf-8') -> Union[BinaryIO, TextIO]:
"""Read a file's content from the remote system.
Args:
path: Path of the file to read from the remote system.
encoding: Encoding to use for decoding the file's bytes to str,
or None to specify no decoding.
Returns:
A readable file-like object, whose read() method will return str
objects decoded according to the specified encoding, or bytes if
encoding is None.
Raises:
PathError: If there was an error reading the file at path, for
example, if the file doesn't exist or is a directory.
"""
query = {
'action': 'read',
'path': path,
}
headers = {'Accept': 'multipart/form-data'}
response = self._request_raw('GET', '/v1/files', query, headers)
options = self._ensure_content_type(response.headers, 'multipart/form-data')
boundary = options.get('boundary', '')
if not boundary:
raise ProtocolError(f'invalid boundary {boundary!r}')
parser = _FilesParser(boundary)
while True:
chunk = response.read(self._chunk_size)
if not chunk:
break
parser.feed(chunk)
resp = parser.get_response()
if resp is None:
raise ProtocolError('no "response" field in multipart body')
self._raise_on_path_error(resp, path)
filenames = parser.filenames()
if not filenames:
raise ProtocolError('no file content in multipart response')
elif len(filenames) > 1:
raise ProtocolError('single file request resulted in a multi-file response')
filename = filenames[0]
if filename != path:
raise ProtocolError(f'path not expected: {filename!r}')
f = parser.get_file(path, encoding)
parser.remove_files()
return f
@staticmethod
def _raise_on_path_error(resp: '_FilesResponse', path: str):
result = resp['result'] or [] # in case it's null instead of []
paths = {item['path']: item for item in result}
if path not in paths:
raise ProtocolError(f'path not found in response metadata: {resp}')
error = paths[path].get('error')
if error:
raise PathError(error['kind'], error['message'])
def push(
self, path: str, source: '_IOSource', *,
encoding: str = 'utf-8', make_dirs: bool = False,
permissions: Optional[int] = None,
user_id: Optional[int] = None,
user: Optional[str] = None,
group_id: Optional[int] = None,
group: Optional[str] = None):
"""Write content to a given file path on the remote system.
Args:
path: Path of the file to write to on the remote system.
source: Source of data to write. This is either a concrete str or
bytes instance, or a readable file-like object.
encoding: Encoding to use for encoding source str to bytes, or
strings read from source if it is a TextIO type. Ignored if
source is bytes or BinaryIO.
make_dirs: If True, create parent directories if they don't exist.
permissions: Permissions (mode) to create file with (Pebble default
is 0o644).
user_id: User ID (UID) for file.
user: Username for file. User's UID must match user_id if both are
specified.
group_id: Group ID (GID) for file.
group: Group name for file. Group's GID must match group_id if
both are specified.
"""
info = self._make_auth_dict(permissions, user_id, user, group_id, group)
info['path'] = path
if make_dirs:
info['make-dirs'] = True
metadata = {
'action': 'write',
'files': [info],
}
data, content_type = self._encode_multipart(metadata, path, source, encoding)
headers = {
'Accept': 'application/json',
'Content-Type': content_type,
}
response = self._request_raw('POST', '/v1/files', None, headers, data)
self._ensure_content_type(response.headers, 'application/json')
resp = json.loads(response.read())
# we need to cast the Dict[Any, Any] to _FilesResponse
self._raise_on_path_error(typing.cast('_FilesResponse', resp), path)
@staticmethod
def _make_auth_dict(permissions: Optional[int],
user_id: Optional[int],
user: Optional[str],
group_id: Optional[int],
group: Optional[str]) -> '_AuthDict':
d: _AuthDict = {}
if permissions is not None:
d['permissions'] = format(permissions, '03o')
if user_id is not None:
d['user-id'] = user_id
if user is not None:
d['user'] = user
if group_id is not None:
d['group-id'] = group_id
if group is not None:
d['group'] = group
return d
def _encode_multipart(self, metadata: Dict[str, Any], path: str,
source: '_IOSource', encoding: str):
# Python's stdlib mime/multipart handling is screwy and doesn't handle
# binary properly, so roll our own.
if isinstance(source, str):
source_io: _AnyStrFileLikeIO = io.StringIO(source)
elif isinstance(source, bytes):
source_io: _AnyStrFileLikeIO = io.BytesIO(source)
else:
source_io: _AnyStrFileLikeIO = source
boundary = binascii.hexlify(os.urandom(16))
path_escaped = path.replace('"', '\\"').encode('utf-8') # NOQA: test_quote_backslashes
content_type = f"multipart/form-data; boundary=\"{boundary.decode('utf-8')}\"" # NOQA: test_quote_backslashes
def generator() -> Generator[bytes, None, None]:
yield b''.join([
b'--', boundary, b'\r\n',
b'Content-Type: application/json\r\n',
b'Content-Disposition: form-data; name="request"\r\n',
b'\r\n',
json.dumps(metadata).encode('utf-8'), b'\r\n',
b'--', boundary, b'\r\n',
b'Content-Type: application/octet-stream\r\n',
b'Content-Disposition: form-data; name="files"; filename="',
path_escaped, b'"\r\n',
b'\r\n',
])
content: Union[str, bytes] = source_io.read(self._chunk_size)
while content:
if isinstance(content, str):
content = content.encode(encoding)
yield content
content = source_io.read(self._chunk_size)
yield b''.join([
b'\r\n',
b'--', boundary, b'--\r\n',
])
return generator(), content_type
def list_files(self, path: str, *, pattern: Optional[str] = None,
itself: bool = False) -> List[FileInfo]:
"""Return list of directory entries from given path on remote system.
Despite the name, this method returns a list of files *and*
directories, similar to :func:`os.listdir` or :func:`os.scandir`.
Args:
path: Path of the directory to list, or path of the file to return
information about.
pattern: If specified, filter the list to just the files that match,
for example ``*.txt``.
itself: If path refers to a directory, return information about the
directory itself, rather than its contents.
"""
query = {
'action': 'list',
'path': path,
}
if pattern:
query['pattern'] = pattern
if itself:
query['itself'] = 'true'
resp = self._request('GET', '/v1/files', query)
result = resp['result'] or [] # in case it's null instead of []
return [FileInfo.from_dict(d) for d in result]
def make_dir(
self, path: str, *, make_parents: bool = False,
permissions: Optional[int] = None,
user_id: Optional[int] = None,
user: Optional[str] = None,
group_id: Optional[int] = None,
group: Optional[str] = None):
"""Create a directory on the remote system with the given attributes.
Args:
path: Path of the directory to create on the remote system.
make_parents: If True, create parent directories if they don't exist.
permissions: Permissions (mode) to create directory with (Pebble
default is 0o755).
user_id: User ID (UID) for directory.
user: Username for directory. User's UID must match user_id if
both are specified.
group_id: Group ID (GID) for directory.
group: Group name for directory. Group's GID must match group_id
if both are specified.
"""
info = self._make_auth_dict(permissions, user_id, user, group_id, group)
info['path'] = path
if make_parents:
info['make-parents'] = True
body = {
'action': 'make-dirs',
'dirs': [info],
}
resp = self._request('POST', '/v1/files', None, body)
self._raise_on_path_error(typing.cast('_FilesResponse', resp), path)
def remove_path(self, path: str, *, recursive: bool = False):
"""Remove a file or directory on the remote system.
Args:
path: Path of the file or directory to delete from the remote system.
recursive: If True, and path is a directory recursively deletes it and
everything under it. If the path is a file, delete the file and
do nothing if the file is non-existent. Behaviourally similar
to ``rm -rf <file|dir>``.
"""
info: Dict[str, Any] = {'path': path}
if recursive:
info['recursive'] = True
body = {
'action': 'remove',
'paths': [info],
}
resp = self._request('POST', '/v1/files', None, body)
self._raise_on_path_error(typing.cast('_FilesResponse', resp), path)
# Exec I/O is str if encoding is provided (the default)
@typing.overload
def exec( # noqa
self,
command: List[str],
*,
service_context: Optional[str] = None,
environment: Optional[Dict[str, str]] = None,
working_dir: Optional[str] = None,
timeout: Optional[float] = None,
user_id: Optional[int] = None,
user: Optional[str] = None,
group_id: Optional[int] = None,
group: Optional[str] = None,
stdin: Optional[Union[str, TextIO]] = None,
stdout: Optional[TextIO] = None,
stderr: Optional[TextIO] = None,
encoding: str = 'utf-8',
combine_stderr: bool = False
) -> ExecProcess[str]:
...
# Exec I/O is bytes if encoding is explicitly set to None
@typing.overload
def exec( # noqa
self,
command: List[str],
*,
service_context: Optional[str] = None,
environment: Optional[Dict[str, str]] = None,
working_dir: Optional[str] = None,
timeout: Optional[float] = None,
user_id: Optional[int] = None,
user: Optional[str] = None,
group_id: Optional[int] = None,
group: Optional[str] = None,
stdin: Optional[Union[bytes, BinaryIO]] = None,
stdout: Optional[BinaryIO] = None,
stderr: Optional[BinaryIO] = None,
encoding: None = None,
combine_stderr: bool = False
) -> ExecProcess[bytes]:
...
def exec(
self,
command: List[str],
*,
service_context: Optional[str] = None,
environment: Optional[Dict[str, str]] = None,
working_dir: Optional[str] = None,
timeout: Optional[float] = None,
user_id: Optional[int] = None,
user: Optional[str] = None,
group_id: Optional[int] = None,
group: Optional[str] = None,
stdin: Optional[Union[str, bytes, TextIO, BinaryIO]] = None,
stdout: Optional[Union[TextIO, BinaryIO]] = None,
stderr: Optional[Union[TextIO, BinaryIO]] = None,
encoding: Optional[str] = 'utf-8',
combine_stderr: bool = False
) -> ExecProcess[Any]:
r"""Execute the given command on the remote system.
Two method signatures are shown because this method returns an
:class:`ExecProcess` that deals with strings if ``encoding`` is
specified (the default ), or one that deals with bytes if ``encoding``
is set to ``None``.
Most of the parameters are explained in the "Parameters" section
below, however, input/output handling is a bit more complex. Some
examples are shown below::
# Simple command with no output; just check exit code
>>> process = client.exec(['send-emails'])
>>> process.wait()
# Fetch output as string
>>> process = client.exec(['python3', '--version'])
>>> version, _ = process.wait_output()
>>> print(version)
Python 3.8.10
# Fetch both stdout and stderr as strings
>>> process = client.exec(['pg_dump', '-s', ...])
>>> schema, logs = process.wait_output()
# Stream input from a string and write output to files
>>> stdin = 'foo\nbar\n'
>>> with open('out.txt', 'w') as out, open('err.txt', 'w') as err:
... process = client.exec(['awk', '{ print toupper($0) }'],
... stdin=stdin, stdout=out, stderr=err)
... process.wait()
>>> open('out.txt').read()
'FOO\nBAR\n'
>>> open('err.txt').read()
''
# Real-time streaming using ExecProcess.stdin and ExecProcess.stdout
>>> process = client.exec(['cat'])
>>> def stdin_thread():
... for line in ['one\n', '2\n', 'THREE\n']:
... process.stdin.write(line)
... process.stdin.flush()
... time.sleep(1)
... process.stdin.close()
...
>>> threading.Thread(target=stdin_thread).start()
>>> for line in process.stdout:
... print(datetime.datetime.now().strftime('%H:%M:%S'), repr(line))
...
16:20:26 'one\n'
16:20:27 '2\n'
16:20:28 'THREE\n'
>>> process.wait() # will return immediately as stdin was closed
# Show exception raised for non-zero return code
>>> process = client.exec(['ls', 'notexist'])
>>> out, err = process.wait_output()
Traceback (most recent call last):
...
ExecError: "ls" returned exit code 2
>>> exc = sys.last_value
>>> exc.exit_code
2
>>> exc.stdout
''
>>> exc.stderr
"ls: cannot access 'notfound': No such file or directory\n"
Args:
command: Command to execute: the first item is the name (or path)
of the executable, the rest of the items are the arguments.
service_context: If specified, run the command in the context of
this service. Specifically, inherit its environment variables,
user/group settings, and working directory. The other exec
options will override the service context; ``environment``
will be merged on top of the service's.
environment: Environment variables to pass to the process.
working_dir: Working directory to run the command in. If not set,
Pebble uses the target user's $HOME directory (and if the user
argument is not set, $HOME of the user Pebble is running as).
timeout: Timeout in seconds for the command execution, after which
the process will be terminated. If not specified, the
execution never times out.
user_id: User ID (UID) to run the process as.
user: Username to run the process as. User's UID must match
user_id if both are specified.
group_id: Group ID (GID) to run the process as.
group: Group name to run the process as. Group's GID must match
group_id if both are specified.
stdin: A string or readable file-like object that is sent to the
process's standard input. If not set, the caller can write
input to :attr:`ExecProcess.stdin` to stream input to the
process.
stdout: A writable file-like object that the process's standard
output is written to. If not set, the caller can use
:meth:`ExecProcess.wait_output` to capture output as a string,
or read from :meth:`ExecProcess.stdout` to stream output from
the process.
stderr: A writable file-like object that the process's standard
error is written to. If not set, the caller can use
:meth:`ExecProcess.wait_output` to capture error output as a
string, or read from :meth:`ExecProcess.stderr` to stream
error output from the process. Must be None if combine_stderr
is True.
encoding: If encoding is set (the default is UTF-8), the types
read or written to stdin/stdout/stderr are str, and encoding
is used to encode them to bytes. If encoding is None, the
types read or written are raw bytes.
combine_stderr: If True, process's stderr output is combined into
its stdout (the stderr argument must be None). If False,
separate streams are used for stdout and stderr.
Returns:
A Process object representing the state of the running process.
To wait for the command to finish, the caller will typically call
:meth:`ExecProcess.wait` if stdout/stderr were provided as
arguments to :meth:`exec`, or :meth:`ExecProcess.wait_output` if
not.
"""
if not isinstance(command, list) or not all(isinstance(s, str) for s in command):
raise TypeError(f'command must be a list of str, not {type(command).__name__}')
if len(command) < 1:
raise ValueError('command must contain at least one item')
if stdin is not None:
if isinstance(stdin, str):
if encoding is None:
raise ValueError('encoding must be set if stdin is str')
stdin = io.BytesIO(stdin.encode(encoding))
elif isinstance(stdin, bytes):
if encoding is not None:
raise ValueError('encoding must be None if stdin is bytes')
stdin = io.BytesIO(stdin)
elif not hasattr(stdin, 'read'):
raise TypeError('stdin must be str, bytes, or a readable file-like object')
if combine_stderr and stderr is not None:
raise ValueError('stderr must be None if combine_stderr is True')
body = {
'command': command,
'service-context': service_context,
'environment': environment or {},
'working-dir': working_dir,
'timeout': _format_timeout(timeout) if timeout is not None else None,
'user-id': user_id,
'user': user,
'group-id': group_id,
'group': group,
'split-stderr': not combine_stderr,
}
resp = self._request('POST', '/v1/exec', body=body)
change_id = resp['change']
task_id = resp['result']['task-id']
stderr_ws: Optional['_WebSocket'] = None
try:
control_ws = self._connect_websocket(task_id, 'control')
stdio_ws = self._connect_websocket(task_id, 'stdio')
if not combine_stderr:
stderr_ws = self._connect_websocket(task_id, 'stderr')
except websocket.WebSocketException as e: # type: ignore
# Error connecting to websockets, probably due to the exec/change
# finishing early with an error. Call wait_change to pick that up.
change = self.wait_change(ChangeID(change_id))
if change.err:
raise ChangeError(change.err, change)
raise ConnectionError(f'unexpected error connecting to websockets: {e}')
cancel_stdin: Optional[Callable[[], None]] = None
cancel_reader: Optional[int] = None
threads: List[threading.Thread] = []
if stdin is not None:
if _has_fileno(stdin):
# Create a pipe so _reader_to_websocket can select() on the
# reader as well as this cancel_reader; when we write anything
# to cancel_writer it'll trigger the select and end the thread.
cancel_reader, cancel_writer = os.pipe()
def _cancel_stdin():
os.write(cancel_writer, b'x') # doesn't matter what we write
os.close(cancel_writer)
cancel_stdin = _cancel_stdin
t = _start_thread(_reader_to_websocket, stdin, stdio_ws, encoding, cancel_reader)
threads.append(t)
process_stdin = None
else:
process_stdin = _WebsocketWriter(stdio_ws)
if encoding is not None:
process_stdin = io.TextIOWrapper(
process_stdin, encoding=encoding, newline='') # type: ignore
if stdout is not None:
t = _start_thread(_websocket_to_writer, stdio_ws, stdout, encoding)
threads.append(t)
process_stdout = None
else:
process_stdout = _WebsocketReader(stdio_ws)
if encoding is not None:
process_stdout = io.TextIOWrapper(
process_stdout, encoding=encoding, newline='') # type: ignore
process_stderr = None
if not combine_stderr:
if stderr is not None:
t = _start_thread(_websocket_to_writer, stderr_ws, stderr, encoding)
threads.append(t)
else:
ws = typing.cast('_WebSocket', stderr_ws)
process_stderr = _WebsocketReader(ws)
if encoding is not None:
process_stderr = io.TextIOWrapper(
process_stderr, encoding=encoding, newline='') # type: ignore
process: ExecProcess[Any] = ExecProcess(
stdin=process_stdin, # type: ignore
stdout=process_stdout, # type: ignore
stderr=process_stderr, # type: ignore
client=self,
timeout=timeout,
stdio_ws=stdio_ws,
stderr_ws=stderr_ws,
control_ws=control_ws,
command=command,
encoding=encoding,
change_id=ChangeID(change_id),
cancel_stdin=cancel_stdin,
cancel_reader=cancel_reader,
threads=threads,
)
return process
def _connect_websocket(self, task_id: str, websocket_id: str) -> '_WebSocket':
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(self.socket_path)
url = self._websocket_url(task_id, websocket_id)
ws: '_WebSocket' = websocket.WebSocket(skip_utf8_validation=True) # type: ignore
ws.connect(url, socket=sock)
return ws
def _websocket_url(self, task_id: str, websocket_id: str) -> str:
base_url = self.base_url.replace('http://', 'ws://')
url = f'{base_url}/v1/tasks/{task_id}/websocket/{websocket_id}'
return url
def send_signal(self, sig: Union[int, str], services: Iterable[str]):
"""Send the given signal to the list of services named.
Args:
sig: Name or number of signal to send, for example ``"SIGHUP"``, ``1``, or
``signal.SIGHUP``.
services: Non-empty list of service names to send the signal to.
Raises:
APIError: If any of the services are not in the plan or are not
currently running.
"""
if isinstance(services, (str, bytes)) or not hasattr(services, '__iter__'):
raise TypeError('services must be of type Iterable[str], '
'not {}'.format(type(services).__name__))
for s in services:
if not isinstance(s, str):
raise TypeError(f'service names must be str, not {type(s).__name__}')
if isinstance(sig, int):
sig = signal.Signals(sig).name
body = {
'signal': sig,
'services': services,
}
self._request('POST', '/v1/signals', body=body)
def get_checks(
self,
level: Optional[CheckLevel] = None,
names: Optional[Iterable[str]] = None
) -> List[CheckInfo]:
"""Get the check status for the configured checks.
Args:
level: Optional check level to query for (default is to fetch
checks with any level).
names: Optional list of check names to query for (default is to
fetch all checks).
Returns:
List of :class:`CheckInfo` objects.
"""
query = {}
if level is not None:
query['level'] = level.value
if names:
query['names'] = list(names)
resp = self._request('GET', '/v1/checks', query)
return [CheckInfo.from_dict(info) for info in resp['result']]
class _FilesParser:
"""A limited purpose multi-part parser backed by files for memory efficiency."""
def __init__(self, boundary: Union[bytes, str]):
self._response: Optional[_FilesResponse] = None # externally managed
self._part_type: Optional[Literal["response", "files"]] = None # externally managed
self._headers: Optional[email.message.Message] = None # externally managed
self._files: Dict[str, _Tempfile] = {}
# Prepare the MIME multipart boundary line patterns.
if isinstance(boundary, str):
boundary = boundary.encode()
# State vars, as we may enter the feed() function multiple times.
self._response_data = bytearray()
self._max_lookahead = 8 * 1024 * 1024
self._parser = _MultipartParser(
boundary,
self._process_header,
self._process_body,
max_lookahead=self._max_lookahead)
# RFC 2046 says that the boundary string needs to be preceded by a CRLF.
# Unfortunately, the request library's header parsing logic strips off one of
# these, so we'll prime the parser buffer with that missing sequence.
self._parser.feed(b'\r\n')
def _process_header(self, data: bytes):
parser = email.parser.BytesFeedParser()
parser.feed(data)
self._headers = parser.close()
content_disposition = self._headers.get_content_disposition()
if content_disposition != 'form-data':
raise ProtocolError(
f'unexpected content disposition: {content_disposition!r}')
name = self._headers.get_param('name', header='content-disposition')
if name == 'files':
filename = self._headers.get_filename()
if filename is None:
raise ProtocolError('multipart "files" part missing filename')
self._prepare_tempfile(filename)
elif name != 'response':
raise ProtocolError(
f'unexpected name in content-disposition header: {name!r}')
self._part_type = typing.cast('Literal["response", "files"]', name)
def _process_body(self, data: bytes, done: bool = False):
if self._part_type == 'response':
self._response_data.extend(data)
if done:
if len(self._response_data) > self._max_lookahead:
raise ProtocolError('response end marker not found')
resp = json.loads(self._response_data.decode())
self._response = typing.cast('_FilesResponse', resp)
self._response_data = bytearray()
elif self._part_type == 'files':
if done:
# This is the final write.
outfile = self._get_open_tempfile()
outfile.write(data)
outfile.close()
self._headers = None
else:
# Not the end of file data yet. Don't open/close file for intermediate writes
outfile = self._get_open_tempfile()
outfile.write(data)
def remove_files(self):
"""Remove all temporary files on disk."""
for file in self._files.values():
os.unlink(file.name)
self._files.clear()
def feed(self, data: bytes):
"""Provide more data to the running parser."""
self._parser.feed(data)
def _prepare_tempfile(self, filename: str):
tf = tempfile.NamedTemporaryFile(delete=False)
self._files[filename] = tf # type: ignore # we have a custom protocol for it
self.current_filename = filename
def _get_open_tempfile(self):
return self._files[self.current_filename]
def get_response(self) -> Optional['_FilesResponse']:
"""Return the deserialized JSON object from the multipart "response" field."""
return self._response
def filenames(self):
"""Return a list of filenames from the "files" parts of the response."""
return list(self._files.keys())
def get_file(self, path: str, encoding: Optional[str]) -> '_TextOrBinaryIO':
"""Return an open file object containing the data."""
mode = 'r' if encoding else 'rb'
# We're using text-based file I/O purely for file encoding purposes, not for
# newline normalization. newline='' serves the line endings as-is.
newline = '' if encoding else None
file_io = open(self._files[path].name, mode,
encoding=encoding, newline=newline)
# open() returns IO[Any]
return typing.cast('_TextOrBinaryIO', file_io)
class _MultipartParser:
def __init__(
self,
marker: bytes,
handle_header: '_HeaderHandler',
handle_body: '_BodyHandler',
max_lookahead: int = 0,
max_boundary_length: int = 0):
r"""Configures a parser for mime multipart messages.
Args:
marker: the multipart boundary marker (i.e. in "\r\n--<marker>--\r\n")
handle_header(data): called once with the entire contents of a part
header as encountered in data fed to the parser
handle_body(data, done=False): called incrementally as part body
data is fed into the parser - its "done" parameter is set to true when
the body is complete.
max_lookahead: maximum amount of bytes to buffer when searching for a complete header.
max_boundary_length: maximum number of bytes that can make up a part
boundary (e.g. \r\n--<marker>--\r\n")
"""
self._marker = marker
self._handle_header = handle_header
self._handle_body = handle_body
self._max_lookahead = max_lookahead
self._max_boundary_length = max_boundary_length
self._buf = bytearray()
self._pos = 0 # current position in buf
self._done = False # whether we have found the terminal boundary and are done parsing
self._header_terminator = b'\r\n\r\n'
# RFC 2046 notes optional "linear whitespace" (e.g. [ \t]+) after the boundary pattern
# and the optional "--" suffix. The boundaries strings can be constructed as follows:
#
# boundary = \r\n--<marker>[ \t]+\r\n
# terminal_boundary = \r\n--<marker>--[ \t]+\r\n
#
# 99 is arbitrarily chosen to represent a max number of linear
# whitespace characters to help avoid wrongly writing boundary
# characters into a (temporary) file.
if not max_boundary_length:
self._max_boundary_length = len(b'\r\n--' + marker + b'--\r\n') + 99
def feed(self, data: bytes):
"""Feeds data incrementally into the parser."""
if self._done:
return
self._buf.extend(data)
while True:
# seek to a boundary if we aren't already on one
i, n, self._done = _next_part_boundary(self._buf, self._marker)
if i == -1 or self._done:
return # waiting for more data or terminal boundary reached
if self._pos == 0:
# parse the part header
if self._max_lookahead and len(self._buf) - self._pos > self._max_lookahead:
raise ProtocolError('header terminator not found')
term_index = self._buf.find(self._header_terminator)
if term_index == -1:
return # waiting for more data
start = i + n
# data includes the double CRLF at the end of the header.
end = term_index + len(self._header_terminator)
self._handle_header(self._buf[start:end])
self._pos = end
else:
# parse the part body
ii, _, self._done = _next_part_boundary(self._buf, self._marker, start=self._pos)
safe_bound = max(0, len(self._buf) - self._max_boundary_length)
if ii != -1:
# part body is finished
self._handle_body(self._buf[self._pos:ii], done=True)
self._buf = self._buf[ii:]
self._pos = 0
if self._done:
return # terminal boundary reached
elif safe_bound > self._pos:
# write partial body data
data = self._buf[self._pos:safe_bound]
self._pos = safe_bound
self._handle_body(data)
return # waiting for more data
else:
return # waiting for more data
def _next_part_boundary(buf: bytes, marker: bytes, start: int = 0
) -> Tuple[int, int, bool]:
"""Returns the index of the next boundary marker in buf beginning at start.
Returns:
(index, length, is_terminal) or (-1, -1, False) if no boundary is found.
"""
prefix = b'\r\n--' + marker
suffix = b'\r\n'
terminal_midfix = b'--'
i = buf.find(prefix, start)
if i == -1:
return -1, -1, False
pos = i + len(prefix)
is_terminal = False
if buf[pos:].startswith(terminal_midfix):
is_terminal = True
pos += len(terminal_midfix)
# Note: RFC 2046 notes optional "linear whitespace" (e.g. [ \t]) after the boundary pattern
# and the optional "--" suffix.
tail = buf[pos:]
for c in tail:
if c not in b' \t':
break
pos += 1
if buf[pos:].startswith(suffix):
pos += len(suffix)
return i, pos - i, is_terminal
return -1, -1, False
|
069ff7613ab55f78c90d8cedd6ab11172c358bc6
|
dcfc88503e3a8df5d9083b512178d254727d1a31
|
/axelrod/strategies/human.py
|
190590c124a64cd65154fbe395b1f9bc3892c7b8
|
[
"MIT"
] |
permissive
|
Axelrod-Python/Axelrod
|
b8502822da103fbf1a56ffbc090453b95bf9f2d8
|
fa748627cd4f0333bb2dbfcb1454372a78a9098a
|
refs/heads/dev
| 2023-09-04T06:41:55.216809
| 2023-07-10T19:42:54
| 2023-07-14T02:37:16
| 30,959,449
| 673
| 289
|
NOASSERTION
| 2023-07-14T02:37:18
| 2015-02-18T09:37:17
|
Python
|
UTF-8
|
Python
| false
| false
| 5,783
|
py
|
human.py
|
from os import linesep
from axelrod.action import Action
from axelrod.player import Player
from prompt_toolkit import prompt
from prompt_toolkit.validation import ValidationError, Validator
try: # pragma: no cover
from prompt_toolkit.styles import style_from_dict
from prompt_toolkit.token import Token
token_toolbar = Token.Toolbar
bottom_toolbar_name = "get_bottom_toolbar_tokens"
PROMPT2 = False
except ImportError: # prompt_toolkit v2
from prompt_toolkit.styles import Style
style_from_dict = Style.from_dict
token_toolbar = "pygments.toolbar"
bottom_toolbar_name = "bottom_toolbar"
PROMPT2 = True
C, D = Action.C, Action.D
toolbar_style = style_from_dict({token_toolbar: "#ffffff bg:#333333"})
class ActionValidator(Validator):
"""
A class to validate input from prompt_toolkit.prompt
Described at http://python-prompt-toolkit.readthedocs.io/en/latest/pages/building_prompts.html#input-validation
"""
def validate(self, document) -> None:
text = document.text
if text and text.upper() not in ["C", "D"]:
raise ValidationError(
message="Action must be C or D", cursor_position=0
)
class Human(Player):
"""
A strategy that prompts for keyboard input rather than deriving its own
action.
This strategy is intended to be used interactively by a user playing
against other strategies from within the rest of the library. Unlike
other strategies, it is designed to be a teaching aid rather than a
research tool.
"""
name = "Human"
classifier = {
"memory_depth": float("inf"),
"stochastic": True,
"long_run_time": True,
"inspects_source": True,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self, name="human", c_symbol="C", d_symbol="D"):
"""
Parameters
----------
name: string
The name of the human player
c_symbol: string
A symbol to denote cooperation within the history toolbar
and prompt
d_symbol: string
A symbol to denote defection within the history toolbar
and prompt
"""
super().__init__()
self.human_name = name
self.symbols = {C: c_symbol, D: d_symbol}
def _history_toolbar(self):
"""
A prompt-toolkit function to define the bottom toolbar.
Described at http://python-prompt-toolkit.readthedocs.io/en/latest/pages/building_prompts.html#adding-a-bottom-toolbar
"""
my_history = [self.symbols[action] for action in self.history]
opponent_history = [
self.symbols[action] for action in self.history.coplays
]
history = list(zip(my_history, opponent_history))
if self.history:
content = "History ({}, opponent): {}".format(
self.human_name, history
)
else:
content = ""
return content
def _status_messages(self):
"""
A method to define the messages printed to the console and
displayed in the prompt-toolkit bottom toolbar.
The bottom toolbar is defined only if a match is in progress.
The console print statement is either the result of the previous
turn or a message indicating that new match is starting.
Returns
-------
dict
mapping print or toolbar to the relevant string
"""
if self.history:
toolbar = (
self._history_toolbar
if PROMPT2
else lambda cli: [(token_toolbar, self._history_toolbar())]
)
print_statement = (
"{}Turn {}: {} played {}, opponent played {}".format(
linesep,
len(self.history),
self.human_name,
self.symbols[self.history[-1]],
self.symbols[self.history.coplays[-1]],
)
)
else:
toolbar = None
print_statement = "{}Starting new match".format(linesep)
return {"toolbar": toolbar, "print": print_statement}
def _get_human_input(self) -> Action: # pragma: no cover
"""
A method to prompt the user for input, validate it and display
the bottom toolbar.
Returns
-------
string
Uppercase C or D indicating the action to play
"""
action = prompt(
"Turn {} action [C or D] for {}: ".format(
len(self.history) + 1, self.human_name
),
validator=ActionValidator(),
style=toolbar_style,
**{bottom_toolbar_name: self.status_messages["toolbar"]},
)
return Action.from_char(action.upper())
def strategy(self, opponent: Player, input_function=None):
"""
Ordinarily, the strategy prompts for keyboard input rather than
deriving its own action.
However, it is also possible to pass a function which returns a valid
action. This is mainly used for testing purposes in order to by-pass
the need for human interaction.
"""
self.status_messages = self._status_messages()
self.status_messages = self._status_messages()
print(self.status_messages["print"])
if not input_function: # pragma: no cover
action = self._get_human_input()
else:
action = input_function()
return action
def __repr__(self):
"""
Override the default __repr__ of the class
"""
return "Human: {}".format(self.human_name)
|
2f465a524c9277642430f129e11bebe46281152e
|
fba876caecb7a55254cf92434a9a8a629ed47b93
|
/apps/challenges/migrations/0088_increase_cpu_workers_and_memory.py
|
8e76b06c4af1507295e39890fc81bcb3a9d328f2
|
[
"BSD-3-Clause"
] |
permissive
|
Cloud-CV/EvalAI
|
f6eb96509f679cb5765fd4b4a49e5b3f5a5551d6
|
7e3485f2f3c77b146b72cbbc8de1b15bf0dfe0db
|
refs/heads/master
| 2023-09-04T05:03:59.087293
| 2023-08-30T19:26:13
| 2023-08-30T19:26:13
| 71,516,397
| 1,722
| 983
|
NOASSERTION
| 2023-09-07T18:02:48
| 2016-10-21T00:51:45
|
Python
|
UTF-8
|
Python
| false
| false
| 633
|
py
|
0088_increase_cpu_workers_and_memory.py
|
# Generated by Django 2.2.20 on 2022-10-04 16:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('challenges', '0087_add_cpu_only_code_upload_job_configs'),
]
operations = [
migrations.AlterField(
model_name='challenge',
name='worker_cpu_cores',
field=models.IntegerField(blank=True, default=512, null=True),
),
migrations.AlterField(
model_name='challenge',
name='worker_memory',
field=models.IntegerField(blank=True, default=1024, null=True),
),
]
|
c593b28bc230b025e173b05c11b1df465518fe01
|
0010b3d8b8f806d6065e1bb1aa3c18f9714001a7
|
/devel/external/AEGIS/clean_pstamp.py
|
3464e65b670c1fdab4de97f8e60e34101b53db8c
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
GalSim-developers/GalSim
|
bfd2d5e57f20874ad81bc735195c5c62efad63eb
|
f1c0319600cc713373f1cea7459171fbf388848e
|
refs/heads/main
| 2023-08-17T07:30:44.583679
| 2023-08-15T02:52:00
| 2023-08-15T02:52:00
| 3,510,804
| 194
| 104
|
NOASSERTION
| 2023-09-12T04:03:38
| 2012-02-22T02:51:45
|
Python
|
UTF-8
|
Python
| false
| false
| 14,430
|
py
|
clean_pstamp.py
|
# Copyright (c) 2012-2022 by the GalSim developers team on GitHub
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
# https://github.com/GalSim-developers/GalSim
#
# GalSim is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
#
"""Program Number: 4
Identifies multiple objects in the postage stamp of a galaxy and replaces
the other object with noise.
Requirements:
postage stamp of galaxy, corresponding segmentation map, noise file
Identification:
The segmentation map corresponding to the object is used to divide the
postage stamp pixels into those belonging to the main galaxy, other
objects and background. The pixels belonging to other objects are replaced
by pixels from the noise file.
Replace pixels:
The other object pixels are replaced with a region of noise map with the same
dimensions as the other object. This is done so as to preserve noise correlations.
The noise pixels values are divided by its standard deviation and multiplied
by the stdev of the background pixels of the postage stamp.
Stamp Stats:
Somes values are saved so that the GalSim COSMOSCatalog class may impose
selection criteria on the quality of the postage stamps.
"""
from astropy.table import Table
import pyfits
import os
class Main_param:
"""Class containing parameters to pass to run analysis on each galaxy file."""
def __init__(self, args):
self.seg_id = args.seg_id
self.num = args.num
self.path = args.main_path + '/' + self.seg_id + '/postage_stamps/'
self.filters = args.filter_names
self.file_filter_name = args.file_filter_name
string = args.main_string.replace('segid', self.seg_id)
string1 = string.replace('num', self.num)
self.gal_files, self.noise_file = {}, {}
self.seg_files, self.cat_files = {}, {}
n = len(self.filters)
for i in range(n):
filter1 = self.filters[i]
string2 = string1.replace('filter', filter1)
self.gal_files[filter1] = self.path + string2 + args.image_string
self.seg_files[filter1] = self.path + string2 + args.seg_string
string3 = args.noise_file.replace('filter', args.file_filter_name[i])
self.noise_file[filter1] = args.main_path + '/' + string3
string4 = args.cat_file.replace('filter', args.filter_names[i])
self.cat_files[filter1] = args.main_path + '/' + self.seg_id + '/' + string4
def div_pixels(seg_map, num):
"""Get pixels that belong to image, other objects, background from
segmentation map
"""
s = seg_map.shape
xs = range(s[0])
ys = range(s[1])
bl = []
oth = {}
oth_segs = []
im = []
check = 0
for x in xs:
for y in ys:
if seg_map[x, y] == 0:
# background pixel
bl.append([x, y])
elif seg_map[x, y] == int(num) + 1:
# image pixel
im.append([x, y])
else:
# other object
if oth.has_key(str(seg_map[x, y])):
oth[str(seg_map[x, y])].append([x, y])
else:
oth[str(seg_map[x, y])] = [[x, y]]
oth_segs.append(str(seg_map[x, y]))
if seg_map[s[0] / 2, s[1] / 2] != int(num) + 1:
check = 1
return im, bl, oth, oth_segs, check
def get_stats(arr, str=None):
"""Returns mean and stdev of arr """
mean = np.mean(arr)
std = np.std(arr)
if str:
print 'Measuring', str
print 'STATS: mean=', mean, ' stdev=', std
return mean, std
def get_avg_around_pix(x0, y0, arr):
"""Returns average values of pixels around (x0,y0) in arr"""
x, y = [x0], [y0]
if x0 > 0:
x.append(x0 - 1)
if arr.shape[0] - 1 > x0:
x.append(x0 + 1)
if y0 > 0:
y.append(y0 - 1)
if arr.shape[1] - 1 > y0:
y.append(y0 + 1)
neighb = [arr[i][j] for i in x for j in y]
avg = np.mean(neighb)
return avg
def get_snr(image_data, b_var, hlr):
"""Returns SNR of shape measurement"""
img = galsim.Image(image_data)
try:
new_params = galsim.hsm.HSMParams(max_amoment=5.0e15,
max_mom2_iter=20000,
convergence_threshold=1.e-5)
res = galsim.hsm.FindAdaptiveMom(img, hsmparams=new_params,
guess_sig=hlr * 2.5)
aperture_noise = float(np.sqrt(b_var * 2. * np.pi * (res.moments_sigma**2)))
sn_ellip_gauss = res.moments_amp / aperture_noise
print 'RES', res.moments_amp, res.moments_sigma
print 'SNR', sn_ellip_gauss
except:
print 'SNR manually set'
sn_ellip_gauss = -10.
print 'SNR', sn_ellip_gauss
return sn_ellip_gauss
def get_min_dist(x0, y0, arr):
"""Returns minimum distance between points in arr and (x0,y0) """
dist = np.hypot(arr.T[0] - x0, arr.T[1] - y0)
min_dist = np.min(dist)
val = np.argmin(dist)
return min_dist, arr[val]
def get_blank_reg(x_r, y_r, noise_file):
"""Returns rectangular randomly picked region of size x_r * y_r from the
noise_file"""
hdu = pyfits.open(noise_file)
bl_dat = hdu[0].data
hdu.close()
s = bl_dat.shape
print "x_r", x_r, 's', s
x0_min = np.random.randint(s[0] - x_r)
y0_min = np.random.randint(s[1] - y_r)
x0_max = x0_min + x_r + 1
y0_max = y0_min + y_r + 1
empty = bl_dat[x0_min:x0_max, y0_min:y0_max]
bl_mean, bl_std = get_stats(bl_dat, str='Blank region from file')
return empty, bl_std
def change_others(arr, to_change,
noise_file, b_std):
"""Change pixels of other object to background
@ arr Postage stamp image of galaxy
@ to_change coordinates of pixels that will be replaced with noise
@ noise_file File with noise pixels
@ b_std Std dev of background pixels of pstamp
"""
xmin, xmax = np.min(to_change.T[0]), np.max(to_change.T[0])
ymin, ymax = np.min(to_change.T[1]), np.max(to_change.T[1])
xr0 = xmax - xmin
yr0 = ymax - ymin
# get noise pixels in a rectangle of size comparable to that which needs replaced
bl_dat, bl_std = get_blank_reg(xr0, yr0, noise_file)
# Change coords of pixels to change to satrt with (0,0)
bl_change = np.array([to_change.T[0] - xmin, to_change.T[1] - ymin]).T
bl_dat = bl_dat / bl_std * b_std
# change pixels of oth in arr to blank value
for p in range(len(to_change)):
arr[to_change[p][0], to_change[p][1]] = bl_dat[bl_change[p][0], bl_change[p][1]]
return arr
def clean_pstamp(args):
"""If a postage stamp has an object other the central galaxy, the other object
is replaced by noise. The postage stamp of image must all have a
segmentation map. A value other than the id number of the image+1 is
replaced in the segmentation map is detected as other object. Other object
pixels in the image postage stamp is replaced by pixels from a noise file.
The input noise file should represent noise background expected in the
image. The replaced noise pixels are normalized by the standard deviation
of background pixels in the postage stamp image
Output: Creates new postage stamp with only the central object, a file
with info on pixels that were changed and backgroound.
"""
params = Main_param(args)
for i, filt in enumerate(params.filters):
print "Running filter", filt
if os.path.isdir(params.path + 'stamp_stats') is False:
subprocess.call(["mkdir", params.path + 'stamp_stats'])
# open image and seg map
catalog = Table.read(params.cat_files[filt], format="ascii.basic")
hlr = catalog['A_IMAGE'][int(params.num)]
hdu1 = pyfits.open(params.gal_files[filt])
hdu2 = pyfits.open(params.seg_files[filt])
im_dat = hdu1[0].data
im_hdr = hdu1[0].header
seg_dat = hdu2[0].data
hdu1.close()
hdu2.close()
shape = im_dat.shape
x0, y0 = shape[0] / 2, shape[1] / 2
# classify pixels as belonging to image, other objects and background
# using segmentation map
im, bl, oth, oth_segs, check = div_pixels(seg_dat, params.num)
# Some bright object is nearby, and its seg map overlaps with central object
# manually set output values so it fails selection tests later
if len(im) == 0:
print "Ignore object"
peak_val = 0
min_dist = 0.
avg_flux = 999.99
snr = -10.
info = [0, 0, 0, min_dist, avg_flux, peak_val, snr]
np.savetxt(params.path + 'stamp_stats/' + params.num + '_' + filt + '.txt', info)
new_im_name = params.path + filt + '_' + params.seg_id + '_' + params.num + '_gal.fits'
pyfits.writeto(new_im_name, im_dat, im_hdr, clobber=True)
continue
# Objects seg map covers entire pstamp, no blank region
# manually set output values so it fails selection tests later
if (len(bl) <= 1):
print "Ignore object"
peak_val = 0
min_dist = 0.
avg_flux = 999.99
snr = -10.
info = [0, 0, 0, min_dist, avg_flux, peak_val, snr]
np.savetxt(params.path + 'stamp_stats/' + params.num + '_' + filt + '.txt', info)
new_im_name = params.path + filt + '_' + params.seg_id + '_' + params.num + '_gal.fits'
pyfits.writeto(new_im_name, im_dat, im_hdr, clobber=True)
continue
peak_val = np.max([[im_dat[im[i][0]][im[i][1]]] for i in range(len(im))])
bck_pixels = [im_dat[bl[i][0], bl[i][1]] for i in range(len(bl))]
b_mean, b_std = get_stats(np.array(bck_pixels), str='Image Background')
# No other object present
if len(oth_segs) == 0:
print "No other object"
print len(bl)
min_dist = 999.99
pix_near_dist = [shape[0] / 2, shape[1] / 2]
avg_flux = 0.
snr = get_snr(im_dat, b_std**2, hlr)
info = [b_mean, b_std, np.sum(im_dat), min_dist, avg_flux, peak_val, snr]
print info
np.savetxt(params.path + 'stamp_stats/' + params.num + '_' + filt + '.txt', info)
new_im_name = params.path + filt + '_' + params.seg_id + '_' + params.num + '_gal.fits'
pyfits.writeto(new_im_name, im_dat, im_hdr, clobber=True)
continue
new_im = im_dat.copy()
min_dists = []
pix_min_dists = []
for oth_seg in oth_segs:
print "Other object detected with id ", oth_seg
print 'MASKING: ', len(oth[oth_seg]), ' pixels out of ', seg_dat.size
print " Noise file used ", params.noise_file
dist, pix = get_min_dist(x0, y0, np.array(oth[oth_seg]))
noise_file = params.noise_file[filt]
new_im = change_others(new_im, np.array(oth[oth_seg]), noise_file, b_std)
min_dists.append(dist)
pix_min_dists.append(pix)
min_dist = np.min(min_dists)
pix_near_dist = pix_min_dists[np.argmin(min_dists)]
avg_flux = get_avg_around_pix(pix_near_dist[0], pix_near_dist[1], im_dat)
snr = get_snr(new_im, b_std**2, hlr)
info = [b_mean, b_std, np.sum(im_dat), min_dist, avg_flux, peak_val, snr]
np.savetxt(params.path + 'stamp_stats/' + params.num + '_' + filt + '.txt', info)
new_im_name = params.path + filt + '_' + params.seg_id + '_' + params.num + '_gal.fits'
print 'CREATED NEW POSTAGE STAMP', new_im_name
pyfits.writeto(new_im_name, new_im, im_hdr, clobber=True)
if __name__ == '__main__':
import subprocess
import galsim
import numpy as np
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('--seg_id', default='1a',
help="Segment id of image to run [Default:1a]")
parser.add_argument('--num', default='0', type=str,
help="Identifier of galaxy to run [Default:0]")
parser.add_argument('--filter_names', default=['f606w', 'f814w'],
help="names of filters [Default: ['f814w','f606w']]")
parser.add_argument('--noise_file', type=str, default='acs_filter_unrot_sci_noise.fits',
help="File containing noise in each band, with band name \
replaced by'filter'[Default:'acs_filter_unrot_sci_noise.fits']]")
parser.add_argument('--cat_file', default='filter_clean.cat',
help="Name of saved catalog file, with band name \
replaced by'filter'[Default:'filter_clean.cat']")
parser.add_argument('--file_filter_name', default=['V', 'I'],
help="Name of filter to use ")
parser.add_argument('--main_path',
default='/nfs/slac/g/ki/ki19/deuce/AEGIS/AEGIS_catalog_full/',
help="Path where image files are stored \
[Default:'/nfs/slac/g/ki/ki19/deuce/AEGIS/AEGIS_catalog_full/'] ")
parser.add_argument('--main_string', default='filter_segid_num_',
help="String of file name with 'ident','segid','filter' \
instead[Default:'ident_segid_filter_']")
parser.add_argument('--image_string', default='image.fits',
help="String of saved galaxy image file [Default:'image.fits']")
parser.add_argument('--seg_string', default='seg.fits',
help="String of saved segmentation map file[Default:'seg.fits']")
parser.add_argument('--pixel_scale', default='0.03',
help="Pixel scale of galaxy image[Default:'0.03' #arcsec/pixel]")
args = parser.parse_args()
clean_pstamp(args)
|
3ae0a9c81d9544017696839e48329889a285259d
|
085cf6512c946d615eda58a3a0d353c0aa1db8cf
|
/deepfence_backend/api/license_api.py
|
4d4a63386098ed2be4c817509ddb54d819cc8c46
|
[
"Apache-2.0"
] |
permissive
|
deepfence/ThreatMapper
|
00c38c65ed2f014004c9818f03d5e129496b4dd8
|
748b0c8782507eaf351625b9c9fad46903ad6237
|
refs/heads/main
| 2023-08-31T11:13:53.813651
| 2023-03-02T00:49:57
| 2023-03-02T00:49:57
| 238,662,977
| 4,540
| 481
|
Apache-2.0
| 2023-09-14T13:24:37
| 2020-02-06T10:30:09
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 439
|
py
|
license_api.py
|
from flask import Blueprint
from utils.common import get_eula_text
from utils.response import set_response
license_api = Blueprint("license_api", __name__)
@license_api.route("/eula", methods=["GET"])
def eula():
"""
Eula
Permission: ALL
---
tags:
- LICENSE API
responses:
200:
description: Request successful.
"""
eula_text = get_eula_text()
return set_response(data=eula_text)
|
a98d0d1130bcc15b652e5bfd73957c4a7b6b46e8
|
0ff5c3178e87a28a82165bfa908d9319cf7a2323
|
/04. Arrays/ArrayPractice.py
|
007afc81752e2338ec5d1764415a64e6876205b1
|
[
"MIT"
] |
permissive
|
SR-Sunny-Raj/Hacktoberfest2021-DSA
|
40bf8385ed976fd81d27340514579b283c339c1f
|
116526c093ed1ac7907483d001859df63c902cb3
|
refs/heads/master
| 2023-01-31T07:46:26.016367
| 2023-01-26T12:45:32
| 2023-01-26T12:45:32
| 262,236,251
| 261
| 963
|
MIT
| 2023-03-25T14:22:10
| 2020-05-08T05:36:07
|
C++
|
UTF-8
|
Python
| false
| false
| 1,828
|
py
|
ArrayPractice.py
|
from array import *
# 1. Create an array and traverse.
my_array = array('i', [1, 2, 3, 4, 5])
for i in my_array:
print(i)
# 2. Access individual elements through indexes
print("Step 2")
print(my_array[3])
# 3. Append any value to the array using append() method
print("Step 3")
my_array.append(6)
print(my_array)
# 4. Insert value in an array using insert() method
print("Step 4")
my_array.insert(3, 11)
print(my_array)
# 5. Extend python array using extend() method
print("Step 5")
my_array1 = array('i', [10, 11, 12])
my_array.extend(my_array1)
print(my_array)
# 6. Add items from list into array using fromlist() method
print("Step 6")
tempList = [20, 21, 22]
my_array.fromlist(tempList)
print(my_array)
# 7. Remove any array element using remove() method
print("Step 7")
my_array.remove(11)
print(my_array)
# 8. Remove last array element using pop() method
print("Step 8")
my_array.pop()
print(my_array)
# 9. Fetch any element through its index using index() method
print("Step 9")
print(my_array.index(21))
# 10. Reverse a python array using reverse() method
print("Step 10")
my_array.reverse()
print(my_array)
# 11. Get array buffer information through buffer_info() method
print("Step 11")
print(my_array.buffer_info())
# 12. Check for number of occurrences of an element using count() method
print("Step 12")
my_array.append(11)
print(my_array.count(11))
print(my_array)
# 13. Convert array to string using tostring() method
print("Step 13")
strTemp = my_array.tostring()
print(strTemp)
ints = array('i')
ints.fromstring(strTemp)
print(ints)
# 14. Convert array to a python list with same elements using tolist() method
print("Step 14")
# print(my_array.tolist())
# 15. Append a string to char array using fromstring() method
# 16. Slice Elements from an array
print("Step 16")
print(my_array[:])
|
23fffc3686be0dfed265369261d8f6ea53737872
|
476c989bfda64cbb23d36d7056aabe6474f49361
|
/glazier/lib/actions/base.py
|
0dd4d8aeaaec081bc6d1b8dc38826adf617dbfe4
|
[
"Apache-2.0"
] |
permissive
|
google/glazier
|
77f41697a449708490252a4831f7e87a3f4bb2bd
|
ec44cb163f54d1393b0a2c2730d5d0d9d0fc8515
|
refs/heads/master
| 2023-09-03T22:23:55.657479
| 2023-08-30T16:37:43
| 2023-08-30T16:38:16
| 79,817,356
| 1,311
| 116
|
Apache-2.0
| 2023-09-12T20:14:24
| 2017-01-23T15:30:21
|
Python
|
UTF-8
|
Python
| false
| false
| 2,523
|
py
|
base.py
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic imaging action class."""
import logging
from typing import Optional
from glazier.lib import errors
class ActionError(errors.GlazierError):
"""Failure completing requested action."""
def __init__(self, message: Optional[str] = None):
if message is None:
message = 'Error encountered while executing action'
super().__init__(
error_code=errors.ErrorCode.ACTION_ERROR,
message=message)
class ValidationError(errors.GlazierError):
"""Failure validating a command type."""
def __init__(self, message: str):
super().__init__(
error_code=errors.ErrorCode.VALIDATION_ERROR,
message=message)
class BaseAction(object):
"""Generic action type."""
def __init__(self, args, build_info):
self._args = args
self._build_info = build_info
self._realtime = False
self._Setup()
def IsRealtime(self):
"""Run the action on discovery rather than queueing in the task list."""
return self._realtime
def Run(self):
"""Override this function to implement a new action."""
pass
def _Setup(self):
"""Override to customize action on initialization."""
pass
def Validate(self):
"""Override this function to implement validation of actions."""
logging.warning('Validation not implemented for action %s.',
self.__class__.__name__)
def _ListOfStringsValidator(self, args, length=1, max_length=None):
if not max_length:
max_length = length
self._TypeValidator(args, list)
if not length <= len(args) <= max_length:
raise ValidationError('Invalid args length: %s' % args)
for arg in args:
self._TypeValidator(arg, str)
def _TypeValidator(self, args, expect_types):
if not isinstance(args, expect_types):
raise ValidationError('Invalid type for arg %s. Found: %s, Expected: %s' %
(args, type(args), str(expect_types)))
|
e163b8b452b12667c5d396008247f88478fb3284
|
0f11518fb0210b6f95ef1ab322396949dd46b574
|
/linkedin_api/client.py
|
188d563fb4c44cb6a3c28073fdb23dc877ce584d
|
[
"MIT"
] |
permissive
|
tomquirk/linkedin-api
|
e46d2d8c35dbc36e8c91968b12c34ff67b438fd7
|
5273fdfd4845b3eab588c08063a96f4910b560f5
|
refs/heads/master
| 2023-09-03T18:14:19.123363
| 2023-08-12T22:46:30
| 2023-08-12T22:46:30
| 137,201,683
| 1,310
| 369
|
MIT
| 2023-09-07T14:38:46
| 2018-06-13T10:40:56
|
Python
|
UTF-8
|
Python
| false
| false
| 5,653
|
py
|
client.py
|
import requests
import logging
from linkedin_api.cookie_repository import CookieRepository
from bs4 import BeautifulSoup
import json
logger = logging.getLogger(__name__)
class ChallengeException(Exception):
pass
class UnauthorizedException(Exception):
pass
class Client(object):
"""
Class to act as a client for the Linkedin API.
"""
# Settings for general Linkedin API calls
LINKEDIN_BASE_URL = "https://www.linkedin.com"
API_BASE_URL = f"{LINKEDIN_BASE_URL}/voyager/api"
REQUEST_HEADERS = {
"user-agent": " ".join(
[
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5)",
"AppleWebKit/537.36 (KHTML, like Gecko)",
"Chrome/83.0.4103.116 Safari/537.36",
]
),
# "accept": "application/vnd.linkedin.normalized+json+2.1",
"accept-language": "en-AU,en-GB;q=0.9,en-US;q=0.8,en;q=0.7",
"x-li-lang": "en_US",
"x-restli-protocol-version": "2.0.0",
# "x-li-track": '{"clientVersion":"1.2.6216","osName":"web","timezoneOffset":10,"deviceFormFactor":"DESKTOP","mpName":"voyager-web"}',
}
# Settings for authenticating with Linkedin
AUTH_REQUEST_HEADERS = {
"X-Li-User-Agent": "LIAuthLibrary:3.2.4 \
com.linkedin.LinkedIn:8.8.1 \
iPhone:8.3",
"User-Agent": "LinkedIn/8.8.1 CFNetwork/711.3.18 Darwin/14.0.0",
"X-User-Language": "en",
"X-User-Locale": "en_US",
"Accept-Language": "en-us",
}
def __init__(
self, *, debug=False, refresh_cookies=False, proxies={}, cookies_dir=None
):
self.session = requests.session()
self.session.proxies.update(proxies)
self.session.headers.update(Client.REQUEST_HEADERS)
self.proxies = proxies
self.logger = logger
self.metadata = {}
self._use_cookie_cache = not refresh_cookies
self._cookie_repository = CookieRepository(cookies_dir=cookies_dir)
logging.basicConfig(level=logging.DEBUG if debug else logging.INFO)
def _request_session_cookies(self):
"""
Return a new set of session cookies as given by Linkedin.
"""
self.logger.debug("Requesting new cookies.")
res = requests.get(
f"{Client.LINKEDIN_BASE_URL}/uas/authenticate",
headers=Client.AUTH_REQUEST_HEADERS,
proxies=self.proxies,
)
return res.cookies
def _set_session_cookies(self, cookies):
"""
Set cookies of the current session and save them to a file named as the username.
"""
self.session.cookies = cookies
self.session.headers["csrf-token"] = self.session.cookies["JSESSIONID"].strip(
'"'
)
@property
def cookies(self):
return self.session.cookies
def authenticate(self, username, password):
if self._use_cookie_cache:
self.logger.debug("Attempting to use cached cookies")
cookies = self._cookie_repository.get(username)
if cookies:
self.logger.debug("Using cached cookies")
self._set_session_cookies(cookies)
self._fetch_metadata()
return
self._do_authentication_request(username, password)
self._fetch_metadata()
def _fetch_metadata(self):
"""
Get metadata about the "instance" of the LinkedIn application for the signed in user.
Store this data in self.metadata
"""
res = requests.get(
f"{Client.LINKEDIN_BASE_URL}",
cookies=self.session.cookies,
headers=Client.AUTH_REQUEST_HEADERS,
proxies=self.proxies,
)
soup = BeautifulSoup(res.text, "lxml")
clientApplicationInstanceRaw = soup.find(
"meta", attrs={"name": "applicationInstance"}
)
if clientApplicationInstanceRaw:
clientApplicationInstanceRaw = (
clientApplicationInstanceRaw.attrs.get("content") or {}
)
clientApplicationInstance = json.loads(clientApplicationInstanceRaw)
self.metadata["clientApplicationInstance"] = clientApplicationInstance
clientPageInstanceIdRaw = soup.find(
"meta", attrs={"name": "clientPageInstanceId"}
)
if clientPageInstanceIdRaw:
clientPageInstanceId = clientPageInstanceIdRaw.attrs.get("content") or {}
self.metadata["clientPageInstanceId"] = clientPageInstanceId
def _do_authentication_request(self, username, password):
"""
Authenticate with Linkedin.
Return a session object that is authenticated.
"""
self._set_session_cookies(self._request_session_cookies())
payload = {
"session_key": username,
"session_password": password,
"JSESSIONID": self.session.cookies["JSESSIONID"],
}
res = requests.post(
f"{Client.LINKEDIN_BASE_URL}/uas/authenticate",
data=payload,
cookies=self.session.cookies,
headers=Client.AUTH_REQUEST_HEADERS,
proxies=self.proxies,
)
data = res.json()
if data and data["login_result"] != "PASS":
raise ChallengeException(data["login_result"])
if res.status_code == 401:
raise UnauthorizedException()
if res.status_code != 200:
raise Exception()
self._set_session_cookies(res.cookies)
self._cookie_repository.save(res.cookies, username)
|
62102abdd9b3bff3d2243e05e880b62bd99dfb38
|
6c9e1a5139ca56b7a5df7d1e7cc7ce4f60e1c8af
|
/histomicstk/preprocessing/color_deconvolution/complement_stain_matrix.py
|
7e81f5ebba2dfc69240c071098aef9a7ed7cc946
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
DigitalSlideArchive/HistomicsTK
|
251b016066144fbad3efb2065993d7981265ab04
|
c03c852e72f1497d22535c6b7d5aba25c74e620d
|
refs/heads/master
| 2023-08-31T02:32:13.773082
| 2023-08-30T20:40:45
| 2023-08-30T20:40:45
| 44,324,447
| 351
| 125
|
Apache-2.0
| 2023-09-13T12:24:13
| 2015-10-15T14:49:21
|
Python
|
UTF-8
|
Python
| false
| false
| 844
|
py
|
complement_stain_matrix.py
|
import numpy as np
def complement_stain_matrix(w):
"""Generates a complemented stain matrix
Used to fill out empty columns of a stain matrix for use with
color_deconvolution. Replaces right-most column with normalized
cross-product of first two columns.
Parameters
----------
w : array_like
A 3x3 stain calibration matrix with stain color vectors in columns.
Returns
-------
w_comp : array_like
A 3x3 complemented stain calibration matrix with a third
orthogonal column.
See Also
--------
histomicstk.preprocessing.color_deconvolution.color_deconvolution
"""
stain0 = w[:, 0]
stain1 = w[:, 1]
stain2 = np.cross(stain0, stain1)
# Normalize new vector to have unit norm
return np.array([stain0, stain1, stain2 / np.linalg.norm(stain2)]).T
|
402d45efe1714d58b8bcef30e2bda37265b35567
|
1b32a80362ce9c2d8f0eb1948637c6599d85aa99
|
/tests/activations_test.py
|
d7dfe6ec439aff882e0d91d75c23572bd0fec2bd
|
[
"MIT"
] |
permissive
|
szymonmaszke/torchlayers
|
4492c628a49f4db30a76a17b5d38591a85109964
|
1eff7c55fdb3733e0acc180be79354ed35e4167c
|
refs/heads/master
| 2022-07-06T18:02:48.567112
| 2021-05-25T13:58:50
| 2022-06-13T19:09:28
| 201,987,932
| 599
| 47
|
MIT
| 2022-06-13T19:09:29
| 2019-08-12T18:35:56
|
Python
|
UTF-8
|
Python
| false
| false
| 358
|
py
|
activations_test.py
|
import torch
import pytest
import torchlayers as tl
@pytest.mark.parametrize("klass", ("Swish", "HardSwish", "HardSigmoid"))
def test_object(klass):
getattr(tl, klass)()(torch.randn(4, 5, 6))
@pytest.mark.parametrize("function", ("swish", "hard_swish", "hard_sigmoid"))
def test_functional(function):
getattr(tl, function)(torch.randn(4, 5, 6))
|
f70fbf384bfe7aed1698742cef7b95d9722f6f4a
|
3395a234e7c80d011607e79c49cd48bf516f256b
|
/dependencies/jedi/third_party/typeshed/third_party/2and3/google/protobuf/internal/enum_type_wrapper.pyi
|
174002d7a414ff7ebd1a62f203098e29ac329ca3
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
srusskih/SublimeJEDI
|
67329b72e184bc9584843968dcc534a002c797a1
|
95c185d778425c04536d53517b0e3fe6dedf8e59
|
refs/heads/master
| 2023-08-24T11:30:37.801834
| 2022-08-30T09:04:17
| 2022-08-30T09:04:17
| 6,241,108
| 669
| 125
|
MIT
| 2022-08-30T09:04:18
| 2012-10-16T08:23:57
|
Python
|
UTF-8
|
Python
| false
| false
| 357
|
pyi
|
enum_type_wrapper.pyi
|
from typing import Any, List, Tuple
class EnumTypeWrapper(object):
def __init__(self, enum_type: Any) -> None: ...
def Name(self, number: int) -> bytes: ...
def Value(self, name: bytes) -> int: ...
def keys(self) -> List[bytes]: ...
def values(self) -> List[int]: ...
@classmethod
def items(cls) -> List[Tuple[bytes, int]]: ...
|
815f11839860ee0dc44d941354b4e92278f66312
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/gui/Scaleform/daapi/view/lobby/event_boards/event_boards_vehicles_overlay.py
|
121d937bf4ccc86065e5a30001d650016bdc3ca7
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 4,391
|
py
|
event_boards_vehicles_overlay.py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/event_boards/event_boards_vehicles_overlay.py
from gui.Scaleform.daapi.view.lobby.event_boards.event_boards_vos import makeFiltersVO, makeVehicleVO
from gui.Scaleform.daapi.view.lobby.event_boards.event_helpers import LEVELS_RANGE
from gui.Scaleform.daapi.view.meta.EventBoardsVehiclesOverlayMeta import EventBoardsVehiclesOverlayMeta
from gui.Scaleform.locale.EVENT_BOARDS import EVENT_BOARDS
from gui.event_boards.event_boards_items import EVENT_TYPE
from gui.shared.formatters.vehicle_filters import packVehicleTypesFilter, packVehicleLevelsFilter, packNationsFilter
from gui.shared.utils.requesters import REQ_CRITERIA
from helpers import int2roman, dependency
from helpers.i18n import makeString as _ms
from skeletons.gui.shared import IItemsCache
class EventBoardsVehiclesOverlay(EventBoardsVehiclesOverlayMeta):
itemsCache = dependency.descriptor(IItemsCache)
__lid = None
__opener = None
__filters = {'nation': -1,
'vehicleType': 'none',
'isMain': False,
'level': -1,
'compatibleOnly': False}
def setOpener(self, view):
self.__opener = view
eventData = self.__opener.eventData
if eventData.getType() == EVENT_TYPE.VEHICLE:
filtersVO = self.__filters.copy()
filtersVO['vehicleTypesDP'] = packVehicleTypesFilter(defaultVehType='none')
filtersVO['levelsDP'] = packVehicleLevelsFilter(LEVELS_RANGE)
filtersVO['nationDP'] = packNationsFilter()
self.as_setFiltersS(filtersVO)
self.applyFilters(**self.__filters)
else:
leaderboards = eventData.getLeaderboards()
leaderboardID = leaderboards[0][0]
header = {'filters': makeFiltersVO(eventData.getType(), leaderboards, leaderboardID, category='vehicles')}
self.as_setHeaderS(header)
self.changeFilter(leaderboardID)
def changeFilter(self, lid):
self.__lid = int(lid)
self._setData()
def applyFilters(self, nation, vehicleType, level, isMain, compatibleOnly):
self.__filters = {'nation': nation,
'vehicleType': vehicleType,
'isMain': isMain,
'level': level,
'compatibleOnly': compatibleOnly}
self._setData()
def _setData(self):
eventData = self.__opener.eventData
eventType = eventData.getType()
criteria = REQ_CRITERIA.EMPTY
if eventType == EVENT_TYPE.VEHICLE:
vehicleIds = [ veh for _, veh in eventData.getLeaderboards() ]
title = _ms(EVENT_BOARDS.VEHICLES_VEHICLE)
bgPath = None
if self.__filters['nation'] != -1:
criteria |= REQ_CRITERIA.NATIONS([self.__filters['nation']])
if self.__filters['vehicleType'] != 'none':
criteria |= REQ_CRITERIA.VEHICLE.CLASSES([self.__filters['vehicleType']])
if self.__filters['isMain']:
criteria |= REQ_CRITERIA.VEHICLE.FAVORITE
if self.__filters['level'] != -1:
criteria |= REQ_CRITERIA.VEHICLE.LEVELS([self.__filters['level']])
else:
vehicleIds = eventData.getLimits().getVehicles(self.__lid)
leaderboard = eventData.getLeaderboard(self.__lid)
if eventType == EVENT_TYPE.NATION:
title = _ms('#menu:nation_tree/title/{}'.format(leaderboard))
bgPath = '../maps/icons/eventBoards/flagsOverlay/{}.png'.format(leaderboard)
elif eventType == EVENT_TYPE.LEVEL:
title = _ms(EVENT_BOARDS.VEHICLES_LEVEL, level=int2roman(leaderboard))
bgPath = None
elif eventType == EVENT_TYPE.CLASS:
title = _ms('#quests:classes/{}'.format(leaderboard))
bgPath = None
else:
title = None
bgPath = None
allVehicles = self.itemsCache.items.getVehicles(REQ_CRITERIA.IN_CD_LIST(vehicleIds))
vehicles = allVehicles.filter(criteria).values()
vehicles.sort(key=lambda v: v.isInInventory, reverse=True)
vehiclesVO = [ makeVehicleVO(vehicle) for vehicle in vehicles ]
data = {'title': title,
'bgPath': bgPath,
'vehicles': vehiclesVO}
self.as_setVehiclesS(data)
return
|
27aa88ff150b6544be43326a0e3aeed001a34bc2
|
f80ef3a3cf859b13e8af8433af549b6b1043bf6e
|
/pyobjc-framework-Photos/PyObjCTest/test_phcloudidentifier.py
|
f5a1fcf03363419f3684b0624049117985239ac8
|
[
"MIT"
] |
permissive
|
ronaldoussoren/pyobjc
|
29dc9ca0af838a56105a9ddd62fb38ec415f0b86
|
77b98382e52818690449111cd2e23cd469b53cf5
|
refs/heads/master
| 2023-09-01T05:15:21.814504
| 2023-06-13T20:00:17
| 2023-06-13T20:00:17
| 243,933,900
| 439
| 49
| null | 2023-06-25T02:49:07
| 2020-02-29T08:43:12
|
Python
|
UTF-8
|
Python
| false
| false
| 239
|
py
|
test_phcloudidentifier.py
|
from PyObjCTools.TestSupport import TestCase, min_os_level
import Photos
class TestPHCloudIdentifier(TestCase):
@min_os_level("10.13")
def testConstants(self):
self.assertIsInstance(Photos.PHLocalIdentifierNotFound, str)
|
983b207ecd1646edb8d4bdec8c7fdd16ee7f046b
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/amcrest/binary_sensor.py
|
e71a5cda538ba342cfbc5996ddbb9d85a2a2c67a
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 9,656
|
py
|
binary_sensor.py
|
"""Support for Amcrest IP camera binary sensors."""
from __future__ import annotations
from contextlib import suppress
from dataclasses import dataclass
from datetime import timedelta
import logging
from typing import TYPE_CHECKING
from amcrest import AmcrestError
import voluptuous as vol
from homeassistant.components.binary_sensor import (
BinarySensorDeviceClass,
BinarySensorEntity,
BinarySensorEntityDescription,
)
from homeassistant.const import CONF_BINARY_SENSORS, CONF_NAME
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from homeassistant.util import Throttle
from .const import (
BINARY_SENSOR_SCAN_INTERVAL_SECS,
DATA_AMCREST,
DEVICES,
SERVICE_EVENT,
SERVICE_UPDATE,
)
from .helpers import log_update_error, service_signal
if TYPE_CHECKING:
from . import AmcrestDevice
@dataclass
class AmcrestSensorEntityDescription(BinarySensorEntityDescription):
"""Describe Amcrest sensor entity."""
event_codes: set[str] | None = None
should_poll: bool = False
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=BINARY_SENSOR_SCAN_INTERVAL_SECS)
_ONLINE_SCAN_INTERVAL = timedelta(seconds=60 - BINARY_SENSOR_SCAN_INTERVAL_SECS)
_AUDIO_DETECTED_KEY = "audio_detected"
_AUDIO_DETECTED_POLLED_KEY = "audio_detected_polled"
_AUDIO_DETECTED_NAME = "Audio Detected"
_AUDIO_DETECTED_EVENT_CODES = {"AudioMutation", "AudioIntensity"}
_CROSSLINE_DETECTED_KEY = "crossline_detected"
_CROSSLINE_DETECTED_POLLED_KEY = "crossline_detected_polled"
_CROSSLINE_DETECTED_NAME = "CrossLine Detected"
_CROSSLINE_DETECTED_EVENT_CODE = "CrossLineDetection"
_MOTION_DETECTED_KEY = "motion_detected"
_MOTION_DETECTED_POLLED_KEY = "motion_detected_polled"
_MOTION_DETECTED_NAME = "Motion Detected"
_MOTION_DETECTED_EVENT_CODE = "VideoMotion"
_ONLINE_KEY = "online"
BINARY_SENSORS: tuple[AmcrestSensorEntityDescription, ...] = (
AmcrestSensorEntityDescription(
key=_AUDIO_DETECTED_KEY,
name=_AUDIO_DETECTED_NAME,
device_class=BinarySensorDeviceClass.SOUND,
event_codes=_AUDIO_DETECTED_EVENT_CODES,
),
AmcrestSensorEntityDescription(
key=_AUDIO_DETECTED_POLLED_KEY,
name=_AUDIO_DETECTED_NAME,
device_class=BinarySensorDeviceClass.SOUND,
event_codes=_AUDIO_DETECTED_EVENT_CODES,
should_poll=True,
),
AmcrestSensorEntityDescription(
key=_CROSSLINE_DETECTED_KEY,
name=_CROSSLINE_DETECTED_NAME,
device_class=BinarySensorDeviceClass.MOTION,
event_codes={_CROSSLINE_DETECTED_EVENT_CODE},
),
AmcrestSensorEntityDescription(
key=_CROSSLINE_DETECTED_POLLED_KEY,
name=_CROSSLINE_DETECTED_NAME,
device_class=BinarySensorDeviceClass.MOTION,
event_codes={_CROSSLINE_DETECTED_EVENT_CODE},
should_poll=True,
),
AmcrestSensorEntityDescription(
key=_MOTION_DETECTED_KEY,
name=_MOTION_DETECTED_NAME,
device_class=BinarySensorDeviceClass.MOTION,
event_codes={_MOTION_DETECTED_EVENT_CODE},
),
AmcrestSensorEntityDescription(
key=_MOTION_DETECTED_POLLED_KEY,
name=_MOTION_DETECTED_NAME,
device_class=BinarySensorDeviceClass.MOTION,
event_codes={_MOTION_DETECTED_EVENT_CODE},
should_poll=True,
),
AmcrestSensorEntityDescription(
key=_ONLINE_KEY,
name="Online",
device_class=BinarySensorDeviceClass.CONNECTIVITY,
should_poll=True,
),
)
BINARY_SENSOR_KEYS = [description.key for description in BINARY_SENSORS]
_EXCLUSIVE_OPTIONS = [
{_AUDIO_DETECTED_KEY, _AUDIO_DETECTED_POLLED_KEY},
{_MOTION_DETECTED_KEY, _MOTION_DETECTED_POLLED_KEY},
{_CROSSLINE_DETECTED_KEY, _CROSSLINE_DETECTED_POLLED_KEY},
]
_UPDATE_MSG = "Updating %s binary sensor"
def check_binary_sensors(value: list[str]) -> list[str]:
"""Validate binary sensor configurations."""
for exclusive_options in _EXCLUSIVE_OPTIONS:
if len(set(value) & exclusive_options) > 1:
raise vol.Invalid(
f"must contain at most one of {', '.join(exclusive_options)}."
)
return value
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up a binary sensor for an Amcrest IP Camera."""
if discovery_info is None:
return
name = discovery_info[CONF_NAME]
device = hass.data[DATA_AMCREST][DEVICES][name]
binary_sensors = discovery_info[CONF_BINARY_SENSORS]
async_add_entities(
[
AmcrestBinarySensor(name, device, entity_description)
for entity_description in BINARY_SENSORS
if entity_description.key in binary_sensors
],
True,
)
class AmcrestBinarySensor(BinarySensorEntity):
"""Binary sensor for Amcrest camera."""
def __init__(
self,
name: str,
device: AmcrestDevice,
entity_description: AmcrestSensorEntityDescription,
) -> None:
"""Initialize entity."""
self._signal_name = name
self._api = device.api
self._channel = device.channel
self.entity_description: AmcrestSensorEntityDescription = entity_description
self._attr_name = f"{name} {entity_description.name}"
self._attr_should_poll = entity_description.should_poll
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self.entity_description.key == _ONLINE_KEY or self._api.available
async def async_update(self) -> None:
"""Update entity."""
if self.entity_description.key == _ONLINE_KEY:
await self._async_update_online()
else:
await self._async_update_others()
@Throttle(_ONLINE_SCAN_INTERVAL)
async def _async_update_online(self) -> None:
if not (self._api.available or self.is_on):
return
_LOGGER.debug(_UPDATE_MSG, self.name)
if self._api.available:
# Send a command to the camera to test if we can still communicate with it.
# Override of Http.async_command() in __init__.py will set self._api.available
# accordingly.
with suppress(AmcrestError):
await self._api.async_current_time
await self._async_update_unique_id()
self._attr_is_on = self._api.available
async def _async_update_others(self) -> None:
if not self.available:
return
_LOGGER.debug(_UPDATE_MSG, self.name)
try:
await self._async_update_unique_id()
except AmcrestError as error:
log_update_error(_LOGGER, "update", self.name, "binary sensor", error)
return
if not (event_codes := self.entity_description.event_codes):
raise ValueError(f"Binary sensor {self.name} event codes not set")
try:
for event_code in event_codes:
if await self._api.async_event_channels_happened(event_code):
self._attr_is_on = True
break
else:
self._attr_is_on = False
except AmcrestError as error:
log_update_error(_LOGGER, "update", self.name, "binary sensor", error)
return
async def _async_update_unique_id(self) -> None:
"""Set the unique id."""
if self._attr_unique_id is None and (
serial_number := await self._api.async_serial_number
):
self._attr_unique_id = (
f"{serial_number}-{self.entity_description.key}-{self._channel}"
)
@callback
def async_on_demand_update_online(self) -> None:
"""Update state."""
_LOGGER.debug(_UPDATE_MSG, self.name)
self._attr_is_on = self._api.available
self.async_write_ha_state()
@callback
def async_event_received(self, state: bool) -> None:
"""Update state from received event."""
_LOGGER.debug(_UPDATE_MSG, self.name)
self._attr_is_on = state
self.async_write_ha_state()
async def async_added_to_hass(self) -> None:
"""Subscribe to signals."""
if self.entity_description.key == _ONLINE_KEY:
self.async_on_remove(
async_dispatcher_connect(
self.hass,
service_signal(SERVICE_UPDATE, self._signal_name),
self.async_on_demand_update_online,
)
)
else:
self.async_on_remove(
async_dispatcher_connect(
self.hass,
service_signal(SERVICE_UPDATE, self._signal_name),
self.async_write_ha_state,
)
)
if (
event_codes := self.entity_description.event_codes
) and not self.entity_description.should_poll:
for event_code in event_codes:
self.async_on_remove(
async_dispatcher_connect(
self.hass,
service_signal(
SERVICE_EVENT,
self._signal_name,
event_code,
),
self.async_event_received,
)
)
|
e5f879e0cbd792430e5b4b5ee1419b33dfb08ba5
|
41e1fe34d19118e85640f4174828bfc1aaf2f060
|
/examples/datavault2-bigdata-example/dags/acme/hooks/file_hook.py
|
6e1d00c7a236bddf45f499fba3241fd28b724b87
|
[] |
no_license
|
gtoonstra/etl-with-airflow
|
1da72e03ba581190790df6db0d85ec989e9e8070
|
1e069daf9db13de6850c9b25d673742cbcc5c207
|
refs/heads/master
| 2022-09-08T07:48:12.528587
| 2021-11-27T10:35:15
| 2021-11-27T10:35:15
| 71,038,701
| 1,270
| 294
| null | 2022-08-11T21:15:35
| 2016-10-16T08:31:14
|
Shell
|
UTF-8
|
Python
| false
| false
| 1,563
|
py
|
file_hook.py
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import os
import shutil
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.utils.helpers import as_flattened_list
from airflow.utils.file import TemporaryDirectory
class FileHook(BaseHook):
""" Hook to interact with file system
"""
def __init__(self,
file_conn_id="file_default"):
conn = self.get_connection(file_conn_id)
self.path = conn.extra_dejson['path']
def transfer_file(self, source_file, target_file):
target_file = os.path.join(self.path, target_file)
dirname = os.path.dirname(target_file)
try:
os.makedirs(dirname)
except OSError as e:
if e.errno != os.errno.EEXIST:
raise
shutil.copyfile(source_file, target_file)
def complete_file_path(self, relative_path):
target_file = os.path.join(self.path, relative_path)
return target_file
|
0aca0497ce4f1b9e4ca83fb86876ef29cc963f7f
|
6bbbb8237c93f9b1f302010a65d6ecb6f286f23b
|
/websauna/tests/crud/test_autoform.py
|
3086f5b214091ebc777e247c23cdd7563f1d7d9c
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
websauna/websauna
|
ea60d5a6aef59b3725bace0d188dacea72574e63
|
a57de54fb8a3fae859f24f373f0292e1e4b3c344
|
refs/heads/master
| 2023-08-07T08:16:51.664340
| 2020-06-06T19:28:18
| 2020-06-06T19:28:18
| 49,773,298
| 294
| 74
|
NOASSERTION
| 2020-12-30T01:48:27
| 2016-01-16T12:55:54
|
Python
|
UTF-8
|
Python
| false
| false
| 8,695
|
py
|
test_autoform.py
|
"""Test form autogeneration and CRUD."""
# Standard Library
import time
# Pyramid
import transaction
import pytest
from splinter.driver import DriverAPI
# Websauna
import websauna
from websauna.tests.test_utils import create_logged_in_user
from websauna.tests.webserver import customized_web_server
from websauna.utils.slug import slug_to_uuid
from websauna.utils.slug import uuid_to_slug
@pytest.fixture(scope="module")
def tutorial_app(request, paster_config):
"""Custom WSGI app with travesal points for sitemap enabled."""
class Initializer(websauna.system.DemoInitializer):
def run(self):
super(Initializer, self).run()
from websauna.tests.crud import tutorial
self.config.scan(tutorial)
global_config, app_settings = paster_config
init = Initializer(global_config, app_settings)
init.run()
app = init.make_wsgi_app()
app.init = init
return app
@pytest.fixture(scope="module")
def web_server(request, tutorial_app):
"""Run a web server with tutorial installed."""
web_server = customized_web_server(request, tutorial_app)
return web_server()
@pytest.fixture(scope="module")
def registry(request, tutorial_app):
"""Run a web server with tutorial installed."""
return tutorial_app.init.config.registry
@pytest.mark.skip("Mark skipped for now before issues resolved on TravisCI - something to do with delays and browsers")
def test_add_question(browser: DriverAPI, registry, web_server, dbsession):
"""Adding questions should be succesful."""
b = browser
if b.driver.capabilities["browserName"] != "firefox":
# Fails at click and JavaScript modals for Chrome
pytest.skip("This test works only under Firefox WebDriver")
create_logged_in_user(dbsession, registry, web_server, browser, admin=True)
b.visit(web_server)
b.find_by_css("#nav-admin").click()
b.find_by_css("#btn-panel-add-question").click()
b.fill("question_text", "What is love")
b.find_by_css("#deformField2-date").click()
# Pick any date
b.find_by_css(".picker__day--infocus")[0].click()
time.sleep(0.8) # Give some time for the browser, next click fails on CI
b.find_by_css("#deformField2-time").click()
b.find_by_css(".picker__list-item")[0].click()
time.sleep(0.5) # Give some time for the browser, next click fails on CI
b.find_by_name("add").click()
assert b.is_element_present_by_css("#msg-item-added")
def test_add_choice_no_question(browser: DriverAPI, registry, web_server, dbsession):
"""Add one choice, no questions available."""
b = browser
create_logged_in_user(dbsession, registry, web_server, browser, admin=True)
b.visit(web_server)
b.find_by_css("#nav-admin").click()
b.find_by_css("#btn-panel-add-choice").click()
b.fill("choice_text", "Baby don't hurt me")
b.find_by_name("add").click()
assert b.is_element_present_by_css("#msg-item-added")
def test_add_choice_question(browser: DriverAPI, registry, web_server, dbsession):
from .tutorial import Question
from .tutorial import Choice
with transaction.manager:
q = Question(question_text="What is love")
dbsession.add(q)
dbsession.flush()
question_uuid = uuid_to_slug(q.uuid)
b = browser
create_logged_in_user(dbsession, registry, web_server, browser, admin=True)
b.visit(web_server)
b.find_by_css("#nav-admin").click()
b.find_by_css("#btn-panel-add-choice").click()
b.fill("choice_text", "Baby don't hurt me")
b.select("question", question_uuid)
b.find_by_name("add").click()
assert b.is_element_present_by_css("#msg-item-added")
with transaction.manager:
assert dbsession.query(Choice).first().question is not None
def test_add_choice_choose_no_question(browser: DriverAPI, registry, web_server, dbsession):
from .tutorial import Question
from .tutorial import Choice
with transaction.manager:
q = Question(question_text="What is love")
dbsession.add(q)
dbsession.flush()
b = browser
create_logged_in_user(dbsession, registry, web_server, browser, admin=True)
b.visit(web_server)
b.find_by_css("#nav-admin").click()
b.find_by_css("#btn-panel-add-choice").click()
b.fill("choice_text", "Baby don't hurt me")
b.find_by_name("add").click()
assert b.is_element_present_by_css("#msg-item-added")
with transaction.manager:
assert dbsession.query(Choice).first().question is None
def test_edit_choice_question(browser: DriverAPI, registry, web_server, dbsession):
"""Change choice's assigned question in edit."""
from .tutorial import Question
from .tutorial import Choice
with transaction.manager:
q = Question(question_text="What is love")
dbsession.add(q)
dbsession.flush()
q2 = Question(question_text="Who shot JFK")
dbsession.add(q2)
dbsession.flush()
q2_slug = uuid_to_slug(q2.uuid)
c = Choice(choice_text="Foobar", question=q)
dbsession.add(c)
dbsession.flush()
c_slug = uuid_to_slug(c.uuid)
b = browser
create_logged_in_user(dbsession, registry, web_server, browser, admin=True)
b.visit("{}/admin/models/choice/{}/edit".format(web_server, c_slug))
b.select("question", q2_slug)
b.find_by_name("save").click()
assert b.is_element_present_by_css("#msg-changes-saved")
with transaction.manager:
c = dbsession.query(Choice).get(1)
assert c.question.uuid == slug_to_uuid(q2_slug)
def test_edit_choice_remove_question(browser: DriverAPI, registry, web_server, dbsession):
"""Editing choice allows us to reset question value back to null."""
from .tutorial import Question
from .tutorial import Choice
with transaction.manager:
q = Question(question_text="What is love")
dbsession.add(q)
dbsession.flush()
c = Choice(choice_text="Foobar", question=q)
dbsession.add(c)
dbsession.flush()
c_slug = uuid_to_slug(c.uuid)
b = browser
create_logged_in_user(dbsession, registry, web_server, browser, admin=True)
b.visit("{}/admin/models/choice/{}/edit".format(web_server, c_slug))
b.select("question", "")
b.find_by_name("save").click()
assert b.is_element_present_by_css("#msg-changes-saved")
with transaction.manager:
c = dbsession.query(Choice).get(1)
assert c.question is None
def test_question_shows_choices(browser: DriverAPI, registry, web_server, dbsession):
"""If question has active choices they are shown on Show screen, albeit not editable."""
from .tutorial import Question
from .tutorial import Choice
with transaction.manager:
q = Question(question_text="What is love")
dbsession.add(q)
dbsession.flush()
q_slug = uuid_to_slug(q.uuid)
c = Choice(choice_text="Baby don't hurt me", question=q)
dbsession.add(c)
dbsession.flush()
b = browser
create_logged_in_user(dbsession, registry, web_server, browser, admin=True)
b.visit("{}/admin/models/question/{}/show".format(web_server, q_slug))
assert b.is_text_present("Baby don't hurt me")
def test_question_listing(browser: DriverAPI, registry, web_server, dbsession):
"""Question listing shows question text."""
from .tutorial import Question
with transaction.manager:
q = Question(question_text="What is love")
dbsession.add(q)
dbsession.flush()
b = browser
create_logged_in_user(dbsession, registry, web_server, browser, admin=True)
b.visit("{}/admin/models/question/listing".format(web_server))
assert b.is_text_present("What is love")
def test_question_delete(browser: DriverAPI, registry, web_server, dbsession):
"""Delete question and make sure it deletes related choices.."""
from .tutorial import Question
from .tutorial import Choice
with transaction.manager:
q = Question(question_text="What is love")
dbsession.add(q)
dbsession.flush()
c = Choice(choice_text="Baby don't hurt me", question=q)
dbsession.add(c)
dbsession.flush()
q_slug = uuid_to_slug(q.uuid)
b = browser
create_logged_in_user(dbsession, registry, web_server, browser, admin=True)
b.visit("{}/admin/models/question/{}".format(web_server, q_slug))
b.find_by_css("#btn-crud-delete").click()
b.find_by_css("#btn-delete-yes").click()
with transaction.manager:
assert dbsession.query(Question).count() == 0
assert dbsession.query(Choice).count() == 0
|
e0501edd6f5df994fb67f8e616a43f1ee677020a
|
02bde3948681b41ab5b94553cc87ca2bfd38f159
|
/src/embedding/faces/download_img_use_gevent.py
|
2ca56272d73a8520684c838e7b3e99b7ddca9cd4
|
[
"MIT"
] |
permissive
|
SharpAI/DeepCamera
|
84829f615a9fe6e3e3d67cb96b3143304ecbad59
|
6375e7c7cfda90345e9182214e9208a42b5254a7
|
refs/heads/master
| 2023-06-18T05:07:19.163001
| 2023-01-18T18:03:19
| 2023-01-18T18:03:19
| 173,961,960
| 1,548
| 244
|
MIT
| 2022-10-07T18:23:20
| 2019-03-05T14:29:23
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,504
|
py
|
download_img_use_gevent.py
|
# coding=utf-8
# gevent协程批量下载sqlite保存的url
from gevent import monkey; monkey.patch_all()
import gevent
import urllib2
import os
import Image
from models import TrainSet
img_dir = 'face_dataset'
# dataset = TrainSet.query.all() # 查询所有行,是一个list
dataset_is = TrainSet.query.filter_by(is_or_isnot=True).all()
# dataset_isnot = TrainSet.query.filter_by(is_or_isnot=False).all()
BASE_FOLDER = os.path.join(os.path.abspath(os.getenv('RUNTIME_BASEDIR',os.path.dirname(__file__))), img_dir)
if not os.path.exists(BASE_FOLDER):
os.makedirs(BASE_FOLDER)
def download_img(url, decice_id, face_id, id):
print('GET: %s' % url)
resp_data = urllib2.urlopen(url).read()
print('%d bytes received from %s.' % (len(resp_data), url))
folder_name = '{}'.format(face_id)
folder_path = os.path.join(BASE_FOLDER, folder_name)
if not os.path.exists(folder_path):
os.makedirs(folder_path)
filename = str(id) + '_' + folder_name + '.png'
image_path = os.path.join(folder_path, filename)
if not os.path.isfile(image_path):
with open(image_path, 'wb') as f:
f.write(resp_data)
im = Image.open(image_path)
im.save(image_path.rsplit('.')[0] + '.jpg')
os.remove(image_path)
# return os.path.join(folder_name, filename)
if __name__ == '__main__':
gevent.joinall([
gevent.spawn(download_img, data.url, data.device_id, data.face_id, data.id)
for data in dataset_is
])
|
fc8d497d30b1ae9eba694d82e328b9ebcf6147ac
|
b964ac1b4c25dff0c45807f7132d64c941845d78
|
/lyrebird/examples/checkers/img_size.py
|
190c355f5feb264b246935918737ebc43785e42b
|
[
"MIT"
] |
permissive
|
Meituan-Dianping/lyrebird
|
6db7fe3be32d74565bbcaa0491f03dc72d506214
|
b1ec5b081660c63e696454b63dd2f3c2b93a85d9
|
refs/heads/master
| 2023-08-25T14:44:01.580972
| 2023-08-23T10:04:00
| 2023-08-23T10:04:00
| 140,687,246
| 963
| 175
|
MIT
| 2023-09-11T09:10:58
| 2018-07-12T08:50:59
|
Python
|
UTF-8
|
Python
| false
| false
| 1,509
|
py
|
img_size.py
|
"""
checker example
steps:
- ignore unexepcted object
- prepare useful info
- check data
Info used in channel flow
├── name
└─┬ flow
├── size
├─┬ request
│ └── url
└─┬ response
└─┬ headers
└── Content-Type
"""
from lyrebird import event
from decimal import Decimal
from lyrebird.checker import ExtensionCategory
TITLE = '<示例脚本>获取图片大小'
CATEGORY = ExtensionCategory.CHECKER
# THRESHOLD_IMG_SIZE: image size limitation
THRESHOLD_IMG_SIZE = 500
@event('flow')
def img_size(msg):
# 1.ignore unexepcted object
if ignore_check(msg):
return
# 2.prepare useful info
img_size = int(msg['flow']['size'])
img_size = Decimal(img_size / 1024).quantize(Decimal('0.0'))
# 3.check data
if img_size > THRESHOLD_IMG_SIZE:
img_url = msg['flow']['request']['url']
img_url = img_url[img_url.rfind('//') + 2:]
title = f'Image size {img_size}KB is beyond expectations: {img_url}\n'
description = f'Image size {img_size}KB is beyond expectations: {img_url}\n'
description += f'Expecte: {THRESHOLD_IMG_SIZE}KB\n'
description += f'Actual: {img_size}KB\n'
event.issue(title, description)
def ignore_check(msg):
if msg['name'] != 'server.response':
return True
if 'response' not in msg['flow']:
return True
if 'image' not in msg['flow']['response']['headers']['Content-Type']:
return True
return False
|
4485dadb926fce298a331eb8b279b944aef167d0
|
8cc3498e311d15c9a4394aaa341ef489b482dbe6
|
/test/language/templates/python/InstantiateTypeAsChoiceFieldTest.py
|
8b78451a8f8f7c243ba615d31d5815bd34367abe
|
[
"BSD-3-Clause"
] |
permissive
|
ndsev/zserio
|
3e55c064f72e86219a6da297f116d3dbb565a9a9
|
c540c4a97fee4e08bfc6669a2cec0d2b8282d8f6
|
refs/heads/master
| 2023-08-24T14:56:10.750155
| 2023-08-11T19:36:54
| 2023-08-11T19:36:54
| 141,550,444
| 113
| 23
|
BSD-3-Clause
| 2023-08-30T11:14:47
| 2018-07-19T08:44:23
|
Java
|
UTF-8
|
Python
| false
| false
| 838
|
py
|
InstantiateTypeAsChoiceFieldTest.py
|
import unittest
import zserio
from testutils import getZserioApi
class InstantiateTypeAsChoiceFieldTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.api = getZserioApi(__file__, "templates.zs").instantiate_type_as_choice_field
def testReadWrite(self):
instantiateTypeAsChoiceField = self.api.InstantiateTypeAsChoiceField(True)
instantiateTypeAsChoiceField.test = self.api.Test32(13)
writer = zserio.BitStreamWriter()
instantiateTypeAsChoiceField.write(writer)
reader = zserio.BitStreamReader(writer.byte_array, writer.bitposition)
readInstantiateTypeAsChoiceField = self.api.InstantiateTypeAsChoiceField(True)
readInstantiateTypeAsChoiceField.read(reader)
self.assertEqual(instantiateTypeAsChoiceField, readInstantiateTypeAsChoiceField)
|
9994ce7e36e3c6252f6ac9e53e8e804cefa892d2
|
b347bc4b850dee4a8a9a171b563a3f31230ce1c7
|
/sktime/transformations/series/detrend/tests/__init__.py
|
b523411f9beb03e097950808a5009030ff0ef689
|
[
"BSD-3-Clause"
] |
permissive
|
sktime/sktime
|
5963962df338c5931a2f9f1794d1203c50ddc27e
|
70b2bfaaa597eb31bc3a1032366dcc0e1f4c8a9f
|
refs/heads/main
| 2023-08-22T18:20:08.022950
| 2023-08-22T15:24:39
| 2023-08-22T15:24:39
| 156,401,841
| 1,117
| 268
|
BSD-3-Clause
| 2023-09-14T20:44:21
| 2018-11-06T15:08:24
|
Python
|
UTF-8
|
Python
| false
| false
| 126
|
py
|
__init__.py
|
"""Tests for detrenders."""
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
__author__ = ["mloning"]
|
c378f7ccc6c25518ad4540087e83fe63bbcf9f29
|
4c800425b941243c521f0a878c1b12a8f5a50585
|
/examples/custom_parameterized_image_label_loss.py
|
c107adc216ef96fd0159a8dc502cafd1ea43f6d7
|
[
"Apache-2.0"
] |
permissive
|
DeepRegNet/DeepReg
|
f7af4554c89a7a40a53bac9f7fc9939402d1110d
|
650a2f1a88ad3c6932be305d6a98a36e26feedc6
|
refs/heads/main
| 2023-04-06T20:40:38.722315
| 2022-05-18T21:52:19
| 2022-05-18T21:52:19
| 269,365,590
| 509
| 78
|
Apache-2.0
| 2023-03-11T12:18:21
| 2020-06-04T13:21:37
|
Python
|
UTF-8
|
Python
| false
| false
| 1,439
|
py
|
custom_parameterized_image_label_loss.py
|
"""This script provides an example of using custom backbone for training."""
import tensorflow as tf
from deepreg.registry import REGISTRY
from deepreg.train import train
@REGISTRY.register_loss(name="lp_norm")
class LPNorm(tf.keras.losses.Loss):
"""
L^p norm between y_true and y_pred, p = 1 or 2.
y_true and y_pred have to be at least 1d tensor, including batch axis.
"""
def __init__(
self,
p: int,
name: str = "LPNorm",
**kwargs,
):
"""
Init.
:param p: order of the norm, 1 or 2.
:param name: name of the loss.
:param kwargs: additional arguments.
"""
super().__init__(name=name, **kwargs)
if p not in [1, 2]:
raise ValueError(f"For LPNorm, p must be 0 or 1, got {p}.")
self.p = p
self.flatten = tf.keras.layers.Flatten()
def call(self, y_true: tf.Tensor, y_pred: tf.Tensor) -> tf.Tensor:
"""
Return loss for a batch.
:param y_true: shape = (batch, ...)
:param y_pred: shape = (batch, ...)
:return: shape = (batch,)
"""
diff = y_true - y_pred
diff = self.flatten(diff)
loss = tf.norm(diff, axis=-1, ord=self.p)
return loss
config_path = "examples/config_custom_parameterized_image_label_loss.yaml"
train(
gpu="",
config_path=config_path,
gpu_allow_growth=True,
ckpt_path="",
)
|
965c51cd9072f73d79ebc91396ea0cf2d3089239
|
84724b34b3f1e84dc53cbca5f3660590dbc34a9f
|
/nova/tests/functional/regressions/test_bug_1908075.py
|
534163fb2bbf8e52c18170cde70591ed22193791
|
[
"Apache-2.0"
] |
permissive
|
openstack/nova
|
2c24b64e3677595611715bae6dda14edd3f90a24
|
065c5906d2da3e2bb6eeb3a7a15d4cd8d98b35e9
|
refs/heads/master
| 2023-08-28T15:10:05.126314
| 2023-08-25T20:31:27
| 2023-08-25T20:31:27
| 790,031
| 2,287
| 2,320
|
Apache-2.0
| 2023-07-08T02:10:29
| 2010-07-22T02:04:27
|
Python
|
UTF-8
|
Python
| false
| false
| 3,834
|
py
|
test_bug_1908075.py
|
# Copyright 2020, Red Hat, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.tests.functional.api import client
from nova.tests.functional import integrated_helpers
class TestVolAttachCinderReset(integrated_helpers._IntegratedTestBase):
"""Regression test for bug 1908075.
This regression test aims to assert if n-api allows a non-multiattached
volume to be attached to multiple instances after an admin has forcibly
reset the state of the volume in Cinder.
"""
microversion = 'latest'
def test_volume_attach_after_cinder_reset_state(self):
volume_id = self.cinder.IMAGE_BACKED_VOL
# Launch a server and attach a volume
server_a = self._create_server(networks='none')
self.api.post_server_volume(
server_a['id'],
{'volumeAttachment': {'volumeId': volume_id}}
)
# reset-state of the volume within the cinder fixture, we don't model
# the state of the volume within the fixture so this will have to do.
del self.cinder.volume_to_attachment[volume_id]
self.assertNotIn(
volume_id, self.cinder.volume_ids_for_instance(server_a['id']))
# Launch a second server and attempt to attach the same volume again
server_b = self._create_server(networks='none')
# Assert that attempting to attach this non multiattach volume to
# another instance is rejected by n-api
ex = self.assertRaises(
client.OpenStackApiException,
self.api.post_server_volume,
server_b['id'],
{'volumeAttachment': {'volumeId': volume_id}}
)
self.assertEqual(400, ex.response.status_code)
def test_volume_attach_after_cinder_reset_state_multiattach_volume(self):
volume_id = self.cinder.MULTIATTACH_VOL
# Launch a server and attach a volume
server_a = self._create_server(networks='none')
self.api.post_server_volume(
server_a['id'],
{'volumeAttachment': {'volumeId': volume_id}}
)
# reset-state of the volume within the cinder fixture, we don't model
# the state of the volume within the fixture so this will have to do.
del self.cinder.volume_to_attachment[volume_id]
self.assertNotIn(
volume_id, self.cinder.volume_ids_for_instance(server_a['id']))
# Launch a second server and attempt to attach the same volume again
server_b = self._create_server(networks='none')
# NOTE(lyarwood): Unlike non-multiattach volumes this should always be
# allowed as we can have multiple active bdms for multiattached volumes
self.api.post_server_volume(
server_b['id'],
{'volumeAttachment': {'volumeId': volume_id}}
)
# Assert that we have bdms within Nova still for this attachment
self.assertEqual(
volume_id,
self.api.get_server_volumes(server_a['id'])[0].get('volumeId'))
self.assertEqual(
volume_id,
self.api.get_server_volumes(server_b['id'])[0].get('volumeId'))
# Assert that the new attachment is the only one in the fixture
self.assertIn(
volume_id, self.cinder.volume_ids_for_instance(server_b['id']))
|
e6105d7e69ca18b35068036699173cf818fbbbd2
|
c5ddaaa915829e946762f83a610a3ef43ad1f190
|
/tests/data/src/pep518_with_extra_and_markers-1.0/setup.py
|
bfac5b46783d51451a3861b726fd93b4a77f4702
|
[
"MIT"
] |
permissive
|
pypa/pip
|
4cebef8b5e8f69921a4cbd37d5b2021fd3200a16
|
0778c1c153da7da457b56df55fb77cbba08dfb0c
|
refs/heads/main
| 2023-08-15T01:51:24.039937
| 2023-08-14T06:22:21
| 2023-08-14T06:22:21
| 1,446,467
| 8,612
| 3,321
|
MIT
| 2023-09-12T07:08:11
| 2011-03-06T14:30:46
|
Python
|
UTF-8
|
Python
| false
| false
| 283
|
py
|
setup.py
|
#!/usr/bin/env python
from setuptools import setup
# ensure dependencies are installed
import simple
import simplewheel
assert simplewheel.__version__ == "2.0"
setup(
name="pep518_with_extra_and_markers",
version="1.0",
py_modules=["pep518_with_extra_and_markers"],
)
|
e4208a778c3057198833b0c87baab627ff5c981c
|
8adef741528ac0af5777fd80dbf37a5350da86b1
|
/demos/chat-server/apps/app/app.py
|
0a8281275709f6b4bf8ea5431e2ca9976d306924
|
[
"Apache-2.0"
] |
permissive
|
wecatch/app-turbo
|
5ff3a1bfa33febd9b24afa0a7e5b2ab71d00c138
|
809c6fdc54fac18441b1d7730ed2c7c75344d705
|
refs/heads/master
| 2022-11-23T12:32:56.660264
| 2022-11-19T03:56:46
| 2022-11-19T03:56:46
| 25,961,303
| 159
| 25
| null | 2017-01-15T06:00:20
| 2014-10-30T08:24:27
|
Python
|
UTF-8
|
Python
| false
| false
| 1,620
|
py
|
app.py
|
# -*- coding:utf-8 -*-
import os.path
import uuid
import tornado.escape
import tornado.web
from tornado import gen
import turbo.log
from turbo.flux import state as turbo_state
from . import base
BaseHandler = base.BaseHandler
logger = turbo.log.getLogger(__file__)
global_message_buffer = turbo_state.chat.message_buffer
class MainHandler(BaseHandler):
def get(self):
self.render("index.html", messages=global_message_buffer.cache)
class MessageNewHandler(BaseHandler):
def post(self):
message = {
"id": str(uuid.uuid4()),
"body": self.get_argument("body"),
}
# to_basestring is necessary for Python 3's json encoder,
# which doesn't accept byte strings.
message["html"] = tornado.escape.to_basestring(
self.render_string("message.html", message=message))
if self.get_argument("next", None):
self.redirect(self.get_argument("next"))
else:
self.write(message)
global_message_buffer.new_messages([message])
class MessageUpdatesHandler(BaseHandler):
@gen.coroutine
def post(self):
cursor = self.get_argument("cursor", None)
# Save the future returned by wait_for_messages so we can cancel
# it in wait_for_messages
self.future = global_message_buffer.wait_for_messages(cursor=cursor)
messages = yield self.future
if self.request.connection.stream.closed():
return
self.write(dict(messages=messages))
def on_connection_close(self):
global_message_buffer.cancel_wait(self.future)
|
1032b2868347ef20cebb4a239f8d62907bb97c06
|
483424524c70852cc043e0d77bf1b757a61d797a
|
/deepspeed/ops/lamb/fused_lamb.py
|
6ccd9d4c6b066601bb2f6e5d8d8def6be09fd22b
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
microsoft/DeepSpeed
|
810f1af320020718d0794f5a97cde6f1d17af122
|
55d9964c59c0c6e23158b5789a5c36c28939a7b0
|
refs/heads/master
| 2023-09-06T07:40:52.145692
| 2023-09-05T23:51:23
| 2023-09-05T23:51:23
| 235,860,204
| 27,557
| 3,347
|
Apache-2.0
| 2023-09-14T21:38:46
| 2020-01-23T18:35:18
|
Python
|
UTF-8
|
Python
| false
| false
| 7,815
|
py
|
fused_lamb.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Copyright NVIDIA/apex
This file is adapted from NVIDIA/apex/optimizer/fused_adam and implements the LAMB optimizer
"""
import types
import torch
from deepspeed.ops.op_builder import FusedLambBuilder
class FusedLamb(torch.optim.Optimizer):
"""Implements the LAMB algorithm. Currently GPU-only.
LAMB was proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes.
https://arxiv.org/abs/1904.00962
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
bias_correction (bool, optional): bias correction (default: True)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square. (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
eps_inside_sqrt (boolean, optional): in the 'update parameters' step,
adds eps to the bias-corrected second moment estimate before
evaluating square root instead of adding it to the square root of
second moment estimate as in the original paper. (default: False)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
max_grad_norm (float, optional): value used to clip global grad norm
(default: 0.0)
max_coeff(float, optional): maximum value of the lamb coefficient (default: 10.0)
min_coeff(float, optional): minimum value of the lamb coefficient (default: 0.01)
amsgrad (boolean, optional): NOT SUPPORTED in FusedLamb!
"""
def __init__(self,
params,
lr=1e-3,
bias_correction=True,
betas=(0.9, 0.999),
eps=1e-8,
eps_inside_sqrt=False,
weight_decay=0.,
max_grad_norm=0.,
max_coeff=10.0,
min_coeff=0.01,
amsgrad=False):
self.fused_lamb_cuda = FusedLambBuilder().load()
if amsgrad:
raise RuntimeError('FusedLamb does not support the AMSGrad variant.')
defaults = dict(lr=lr,
bias_correction=bias_correction,
betas=betas,
eps=eps,
weight_decay=weight_decay,
max_grad_norm=max_grad_norm,
max_coeff=max_coeff,
min_coeff=min_coeff)
super(FusedLamb, self).__init__(params, defaults)
self.eps_mode = 0 if eps_inside_sqrt else 1
self.lamb_coeffs = []
def step(self, closure=None, grads=None, output_params=None, scale=1., grad_norms=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
grads (list of tensors, optional): weight gradient to use for the
optimizer update. If gradients have type torch.half, parameters
are expected to be in type torch.float. (default: None)
output params (list of tensors, optional): A reduced precision copy
of the updated weights written out in addition to the regular
updated weights. Have to be of same type as gradients. (default: None)
scale (float, optional): factor to divide gradient tensor values
by before applying to weights. (default: 1)
"""
loss = None
if closure is not None:
loss = closure()
if grads is None:
grads_group = [None] * len(self.param_groups)
# backward compatibility
# assuming a list/generator of parameter means single group
elif isinstance(grads, types.GeneratorType):
grads_group = [grads]
elif type(grads[0]) != list:
grads_group = [grads]
else:
grads_group = grads
if output_params is None:
output_params_group = [None] * len(self.param_groups)
elif isinstance(output_params, types.GeneratorType):
output_params_group = [output_params]
elif type(output_params[0]) != list:
output_params_group = [output_params]
else:
output_params_group = output_params
if grad_norms is None:
grad_norms = [None] * len(self.param_groups)
#remove the previous coeffs
del self.lamb_coeffs[:]
for group, grads_this_group, output_params_this_group, grad_norm_group in zip(
self.param_groups, grads_group, output_params_group, grad_norms):
if grads_this_group is None:
grads_this_group = [None] * len(group['params'])
if output_params_this_group is None:
output_params_this_group = [None] * len(group['params'])
if grad_norm_group is None:
grad_norm_group = [None] * len(group['params'])
elif not isinstance(grad_norm_group, list):
grad_norm_group = [grad_norm_group]
bias_correction = 1 if group['bias_correction'] else 0
for p, grad, output_param, grad_norm in zip(group['params'], grads_this_group, output_params_this_group,
grad_norm_group):
# compute combined scale factor for this group
combined_scale = scale
if group['max_grad_norm'] > 0:
# norm is in fact norm*scale
clip = ((grad_norm / scale) + 1e-6) / group['max_grad_norm']
if clip > 1:
combined_scale = clip * scale
#note: p.grad should not ever be set for correct operation of mixed precision optimizer that sometimes sends None gradients
if p.grad is None and grad is None:
continue
if grad is None:
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('FusedLamb does not support sparse gradients')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
max_coeff = group['max_coeff']
min_coeff = group['min_coeff']
state['step'] += 1
out_p = torch.tensor([], dtype=torch.float) if output_param is None else output_param
lamb_coeff = self.fused_lamb_cuda.lamb(p.data, out_p, exp_avg, exp_avg_sq, grad, group['lr'], beta1,
beta2, max_coeff, min_coeff, group['eps'], combined_scale,
state['step'], self.eps_mode, bias_correction,
group['weight_decay'])
self.lamb_coeffs.append(lamb_coeff)
return loss
def get_lamb_coeffs(self):
lamb_coeffs = [lamb_coeff.item() for lamb_coeff in self.lamb_coeffs]
return lamb_coeffs
|
f573cf2024d885c6aefeb435b4760b3e1d1f548f
|
86948ff50c9355062e55b1f6a41029685270b76c
|
/hyptorch/pmath.py
|
92e95a7db65c439dfb412b062cc1acbe94c224d0
|
[
"MIT"
] |
permissive
|
leymir/hyperbolic-image-embeddings
|
c56d0049a190d6e12f83de9e0c65742dd9badbdb
|
6633edbbeffd6d90271f0963852a046c64f407d6
|
refs/heads/master
| 2022-01-29T13:35:04.796969
| 2022-01-12T13:15:29
| 2022-01-12T13:15:29
| 178,893,711
| 233
| 42
|
MIT
| 2020-06-13T20:36:55
| 2019-04-01T15:34:48
|
Python
|
UTF-8
|
Python
| false
| false
| 13,616
|
py
|
pmath.py
|
"""
Implementation of various mathematical operations in the Poincare ball model of hyperbolic space. Some
functions are based on the implementation in https://github.com/geoopt/geoopt (copyright by Maxim Kochurov).
"""
import numpy as np
import torch
from scipy.special import gamma
def tanh(x, clamp=15):
return x.clamp(-clamp, clamp).tanh()
# +
class Artanh(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
x = x.clamp(-1 + 1e-5, 1 - 1e-5)
ctx.save_for_backward(x)
res = (torch.log_(1 + x).sub_(torch.log_(1 - x))).mul_(0.5)
return res
@staticmethod
def backward(ctx, grad_output):
(input,) = ctx.saved_tensors
return grad_output / (1 - input ** 2)
class RiemannianGradient(torch.autograd.Function):
c = 1
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, grad_output):
(x,) = ctx.saved_tensors
# x: B x d
scale = (1 - RiemannianGradient.c * x.pow(2).sum(-1, keepdim=True)).pow(2) / 4
return grad_output * scale
# -
class Arsinh(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return (x + torch.sqrt_(1 + x.pow(2))).clamp_min_(1e-5).log_()
@staticmethod
def backward(ctx, grad_output):
(input,) = ctx.saved_tensors
return grad_output / (1 + input ** 2) ** 0.5
def artanh(x):
return Artanh.apply(x)
def arsinh(x):
return Arsinh.apply(x)
def arcosh(x, eps=1e-5): # pragma: no cover
x = x.clamp(-1 + eps, 1 - eps)
return torch.log(x + torch.sqrt(1 + x) * torch.sqrt(x - 1))
def project(x, *, c=1.0):
r"""
Safe projection on the manifold for numerical stability. This was mentioned in [1]_
Parameters
----------
x : tensor
point on the Poincare ball
c : float|tensor
ball negative curvature
Returns
-------
tensor
projected vector on the manifold
References
----------
.. [1] Hyperbolic Neural Networks, NIPS2018
https://arxiv.org/abs/1805.09112
"""
c = torch.as_tensor(c).type_as(x)
return _project(x, c)
def _project(x, c):
norm = torch.clamp_min(x.norm(dim=-1, keepdim=True, p=2), 1e-5)
maxnorm = (1 - 1e-3) / (c ** 0.5)
cond = norm > maxnorm
projected = x / norm * maxnorm
return torch.where(cond, projected, x)
def lambda_x(x, *, c=1.0, keepdim=False):
r"""
Compute the conformal factor :math:`\lambda^c_x` for a point on the ball
.. math::
\lambda^c_x = \frac{1}{1 - c \|x\|_2^2}
Parameters
----------
x : tensor
point on the Poincare ball
c : float|tensor
ball negative curvature
keepdim : bool
retain the last dim? (default: false)
Returns
-------
tensor
conformal factor
"""
c = torch.as_tensor(c).type_as(x)
return _lambda_x(x, c, keepdim=keepdim)
def _lambda_x(x, c, keepdim: bool = False):
return 2 / (1 - c * x.pow(2).sum(-1, keepdim=keepdim))
def mobius_add(x, y, *, c=1.0):
r"""
Mobius addition is a special operation in a hyperbolic space.
.. math::
x \oplus_c y = \frac{
(1 + 2 c \langle x, y\rangle + c \|y\|^2_2) x + (1 - c \|x\|_2^2) y
}{
1 + 2 c \langle x, y\rangle + c^2 \|x\|^2_2 \|y\|^2_2
}
In general this operation is not commutative:
.. math::
x \oplus_c y \ne y \oplus_c x
But in some cases this property holds:
* zero vector case
.. math::
\mathbf{0} \oplus_c x = x \oplus_c \mathbf{0}
* zero negative curvature case that is same as Euclidean addition
.. math::
x \oplus_0 y = y \oplus_0 x
Another usefull property is so called left-cancellation law:
.. math::
(-x) \oplus_c (x \oplus_c y) = y
Parameters
----------
x : tensor
point on the Poincare ball
y : tensor
point on the Poincare ball
c : float|tensor
ball negative curvature
Returns
-------
tensor
the result of mobius addition
"""
c = torch.as_tensor(c).type_as(x)
return _mobius_add(x, y, c)
def _mobius_add(x, y, c):
x2 = x.pow(2).sum(dim=-1, keepdim=True)
y2 = y.pow(2).sum(dim=-1, keepdim=True)
xy = (x * y).sum(dim=-1, keepdim=True)
num = (1 + 2 * c * xy + c * y2) * x + (1 - c * x2) * y
denom = 1 + 2 * c * xy + c ** 2 * x2 * y2
return num / (denom + 1e-5)
def dist(x, y, *, c=1.0, keepdim=False):
r"""
Distance on the Poincare ball
.. math::
d_c(x, y) = \frac{2}{\sqrt{c}}\tanh^{-1}(\sqrt{c}\|(-x)\oplus_c y\|_2)
.. plot:: plots/extended/poincare/distance.py
Parameters
----------
x : tensor
point on poincare ball
y : tensor
point on poincare ball
c : float|tensor
ball negative curvature
keepdim : bool
retain the last dim? (default: false)
Returns
-------
tensor
geodesic distance between :math:`x` and :math:`y`
"""
c = torch.as_tensor(c).type_as(x)
return _dist(x, y, c, keepdim=keepdim)
def _dist(x, y, c, keepdim: bool = False):
sqrt_c = c ** 0.5
dist_c = artanh(sqrt_c * _mobius_add(-x, y, c).norm(dim=-1, p=2, keepdim=keepdim))
return dist_c * 2 / sqrt_c
def dist0(x, *, c=1.0, keepdim=False):
r"""
Distance on the Poincare ball to zero
Parameters
----------
x : tensor
point on poincare ball
c : float|tensor
ball negative curvature
keepdim : bool
retain the last dim? (default: false)
Returns
-------
tensor
geodesic distance between :math:`x` and :math:`0`
"""
c = torch.as_tensor(c).type_as(x)
return _dist0(x, c, keepdim=keepdim)
def _dist0(x, c, keepdim: bool = False):
sqrt_c = c ** 0.5
dist_c = artanh(sqrt_c * x.norm(dim=-1, p=2, keepdim=keepdim))
return dist_c * 2 / sqrt_c
def expmap(x, u, *, c=1.0):
r"""
Exponential map for Poincare ball model. This is tightly related with :func:`geodesic`.
Intuitively Exponential map is a smooth constant travelling from starting point :math:`x` with speed :math:`u`.
A bit more formally this is travelling along curve :math:`\gamma_{x, u}(t)` such that
.. math::
\gamma_{x, u}(0) = x\\
\dot\gamma_{x, u}(0) = u\\
\|\dot\gamma_{x, u}(t)\|_{\gamma_{x, u}(t)} = \|u\|_x
The existence of this curve relies on uniqueness of differential equation solution, that is local.
For the Poincare ball model the solution is well defined globally and we have.
.. math::
\operatorname{Exp}^c_x(u) = \gamma_{x, u}(1) = \\
x\oplus_c \tanh(\sqrt{c}/2 \|u\|_x) \frac{u}{\sqrt{c}\|u\|_2}
Parameters
----------
x : tensor
starting point on poincare ball
u : tensor
speed vector on poincare ball
c : float|tensor
ball negative curvature
Returns
-------
tensor
:math:`\gamma_{x, u}(1)` end point
"""
c = torch.as_tensor(c).type_as(x)
return _expmap(x, u, c)
def _expmap(x, u, c): # pragma: no cover
sqrt_c = c ** 0.5
u_norm = torch.clamp_min(u.norm(dim=-1, p=2, keepdim=True), 1e-5)
second_term = (
tanh(sqrt_c / 2 * _lambda_x(x, c, keepdim=True) * u_norm)
* u
/ (sqrt_c * u_norm)
)
gamma_1 = _mobius_add(x, second_term, c)
return gamma_1
def expmap0(u, *, c=1.0):
r"""
Exponential map for Poincare ball model from :math:`0`.
.. math::
\operatorname{Exp}^c_0(u) = \tanh(\sqrt{c}/2 \|u\|_2) \frac{u}{\sqrt{c}\|u\|_2}
Parameters
----------
u : tensor
speed vector on poincare ball
c : float|tensor
ball negative curvature
Returns
-------
tensor
:math:`\gamma_{0, u}(1)` end point
"""
c = torch.as_tensor(c).type_as(u)
return _expmap0(u, c)
def _expmap0(u, c):
sqrt_c = c ** 0.5
u_norm = torch.clamp_min(u.norm(dim=-1, p=2, keepdim=True), 1e-5)
gamma_1 = tanh(sqrt_c * u_norm) * u / (sqrt_c * u_norm)
return gamma_1
def logmap(x, y, *, c=1.0):
r"""
Logarithmic map for two points :math:`x` and :math:`y` on the manifold.
.. math::
\operatorname{Log}^c_x(y) = \frac{2}{\sqrt{c}\lambda_x^c} \tanh^{-1}(
\sqrt{c} \|(-x)\oplus_c y\|_2
) * \frac{(-x)\oplus_c y}{\|(-x)\oplus_c y\|_2}
The result of Logarithmic map is a vector such that
.. math::
y = \operatorname{Exp}^c_x(\operatorname{Log}^c_x(y))
Parameters
----------
x : tensor
starting point on poincare ball
y : tensor
target point on poincare ball
c : float|tensor
ball negative curvature
Returns
-------
tensor
tangent vector that transports :math:`x` to :math:`y`
"""
c = torch.as_tensor(c).type_as(x)
return _logmap(x, y, c)
def _logmap(x, y, c): # pragma: no cover
sub = _mobius_add(-x, y, c)
sub_norm = sub.norm(dim=-1, p=2, keepdim=True)
lam = _lambda_x(x, c, keepdim=True)
sqrt_c = c ** 0.5
return 2 / sqrt_c / lam * artanh(sqrt_c * sub_norm) * sub / sub_norm
def logmap0(y, *, c=1.0):
r"""
Logarithmic map for :math:`y` from :math:`0` on the manifold.
.. math::
\operatorname{Log}^c_0(y) = \tanh^{-1}(\sqrt{c}\|y\|_2) \frac{y}{\|y\|_2}
The result is such that
.. math::
y = \operatorname{Exp}^c_0(\operatorname{Log}^c_0(y))
Parameters
----------
y : tensor
target point on poincare ball
c : float|tensor
ball negative curvature
Returns
-------
tensor
tangent vector that transports :math:`0` to :math:`y`
"""
c = torch.as_tensor(c).type_as(y)
return _logmap0(y, c)
def _logmap0(y, c):
sqrt_c = c ** 0.5
y_norm = torch.clamp_min(y.norm(dim=-1, p=2, keepdim=True), 1e-5)
return y / y_norm / sqrt_c * artanh(sqrt_c * y_norm)
def mobius_matvec(m, x, *, c=1.0):
r"""
Generalization for matrix-vector multiplication to hyperbolic space defined as
.. math::
M \otimes_c x = (1/\sqrt{c}) \tanh\left(
\frac{\|Mx\|_2}{\|x\|_2}\tanh^{-1}(\sqrt{c}\|x\|_2)
\right)\frac{Mx}{\|Mx\|_2}
Parameters
----------
m : tensor
matrix for multiplication
x : tensor
point on poincare ball
c : float|tensor
negative ball curvature
Returns
-------
tensor
Mobius matvec result
"""
c = torch.as_tensor(c).type_as(x)
return _mobius_matvec(m, x, c)
def _mobius_matvec(m, x, c):
x_norm = torch.clamp_min(x.norm(dim=-1, keepdim=True, p=2), 1e-5)
sqrt_c = c ** 0.5
mx = x @ m.transpose(-1, -2)
mx_norm = mx.norm(dim=-1, keepdim=True, p=2)
res_c = tanh(mx_norm / x_norm * artanh(sqrt_c * x_norm)) * mx / (mx_norm * sqrt_c)
cond = (mx == 0).prod(-1, keepdim=True, dtype=torch.uint8)
res_0 = torch.zeros(1, dtype=res_c.dtype, device=res_c.device)
res = torch.where(cond, res_0, res_c)
return _project(res, c)
def _tensor_dot(x, y):
res = torch.einsum("ij,kj->ik", (x, y))
return res
def _mobius_addition_batch(x, y, c):
xy = _tensor_dot(x, y) # B x C
x2 = x.pow(2).sum(-1, keepdim=True) # B x 1
y2 = y.pow(2).sum(-1, keepdim=True) # C x 1
num = 1 + 2 * c * xy + c * y2.permute(1, 0) # B x C
num = num.unsqueeze(2) * x.unsqueeze(1)
num = num + (1 - c * x2).unsqueeze(2) * y # B x C x D
denom_part1 = 1 + 2 * c * xy # B x C
denom_part2 = c ** 2 * x2 * y2.permute(1, 0)
denom = denom_part1 + denom_part2
res = num / (denom.unsqueeze(2) + 1e-5)
return res
def _hyperbolic_softmax(X, A, P, c):
lambda_pkc = 2 / (1 - c * P.pow(2).sum(dim=1))
k = lambda_pkc * torch.norm(A, dim=1) / torch.sqrt(c)
mob_add = _mobius_addition_batch(-P, X, c)
num = 2 * torch.sqrt(c) * torch.sum(mob_add * A.unsqueeze(1), dim=-1)
denom = torch.norm(A, dim=1, keepdim=True) * (1 - c * mob_add.pow(2).sum(dim=2))
logit = k.unsqueeze(1) * arsinh(num / denom)
return logit.permute(1, 0)
def p2k(x, c):
denom = 1 + c * x.pow(2).sum(-1, keepdim=True)
return 2 * x / denom
def k2p(x, c):
denom = 1 + torch.sqrt(1 - c * x.pow(2).sum(-1, keepdim=True))
return x / denom
def lorenz_factor(x, *, c=1.0, dim=-1, keepdim=False):
"""
Parameters
----------
x : tensor
point on Klein disk
c : float
negative curvature
dim : int
dimension to calculate Lorenz factor
keepdim : bool
retain the last dim? (default: false)
Returns
-------
tensor
Lorenz factor
"""
return 1 / torch.sqrt(1 - c * x.pow(2).sum(dim=dim, keepdim=keepdim))
def poincare_mean(x, dim=0, c=1.0):
x = p2k(x, c)
lamb = lorenz_factor(x, c=c, keepdim=True)
mean = torch.sum(lamb * x, dim=dim, keepdim=True) / torch.sum(
lamb, dim=dim, keepdim=True
)
mean = k2p(mean, c)
return mean.squeeze(dim)
def _dist_matrix(x, y, c):
sqrt_c = c ** 0.5
return (
2
/ sqrt_c
* artanh(sqrt_c * torch.norm(_mobius_addition_batch(-x, y, c=c), dim=-1))
)
def dist_matrix(x, y, c=1.0):
c = torch.as_tensor(c).type_as(x)
return _dist_matrix(x, y, c)
def auto_select_c(d):
"""
calculates the radius of the Poincare ball,
such that the d-dimensional ball has constant volume equal to pi
"""
dim2 = d / 2.0
R = gamma(dim2 + 1) / (np.pi ** (dim2 - 1))
R = R ** (1 / float(d))
c = 1 / (R ** 2)
return c
|
ef2e6cdf64f9a662ac36b0536ddd97552d3cbb7c
|
4b2aa092c09094c872f855ecba9b25c12e08d010
|
/pyprof/examples/jit/jit_script_function.py
|
d3dbf9bb05ad0a07019ed38babc369842258eaa8
|
[
"Apache-2.0"
] |
permissive
|
NVIDIA/PyProf
|
46aaaa867f3773a20546fcdeafba7772d060f248
|
218dcc183bf7fdf97dbfc648878a3d09aea3b199
|
refs/heads/main
| 2023-09-04T00:00:41.542539
| 2021-05-14T14:40:29
| 2021-05-14T16:44:27
| 245,519,329
| 461
| 44
|
Apache-2.0
| 2022-10-24T20:06:13
| 2020-03-06T21:24:44
|
Python
|
UTF-8
|
Python
| false
| false
| 1,258
|
py
|
jit_script_function.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.cuda.profiler as profiler
import pyprof
#The following creates an object "foo" of type ScriptModule
#The new object has a function called "forward"
@torch.jit.script
def foo(x, y):
return torch.sigmoid(x) + y
#Initialize pyprof after the JIT step
pyprof.init()
#Assign a name to the object "foo"
foo.__name__ = "foo"
#Hook up the forward function to pyprof
pyprof.nvtx.wrap(foo, 'forward')
x = torch.zeros(4, 4).cuda()
y = torch.ones(4, 4).cuda()
with torch.autograd.profiler.emit_nvtx():
profiler.start()
z = foo(x, y)
profiler.stop()
print(z)
|
298a5dd1722ad6daf25215235d5334c55440f68a
|
3a50c0712e0a31b88d0a5e80a0c01dbefc6a6e75
|
/thrift/compiler/test/fixtures/nolegacy/gen-py3/test/fixtures/nolegacy/module/builders.pyi
|
d8ecb5febe23379b9e109955bd662ff1c65bae78
|
[
"Apache-2.0"
] |
permissive
|
facebook/fbthrift
|
3b7b94a533666c965ce69cfd6054041218b1ea6f
|
53cf6f138a7648efe5aef9a263aabed3d282df91
|
refs/heads/main
| 2023-08-24T12:51:32.367985
| 2023-08-24T08:28:35
| 2023-08-24T08:28:35
| 11,131,631
| 2,347
| 666
|
Apache-2.0
| 2023-09-01T01:44:39
| 2013-07-02T18:15:51
|
C++
|
UTF-8
|
Python
| false
| false
| 1,234
|
pyi
|
builders.pyi
|
#
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
# @generated
#
import typing as _typing
import folly.iobuf as _fbthrift_iobuf
import thrift.py3.builder
import test.fixtures.nolegacy.module.types as _test_fixtures_nolegacy_module_types
class TestError_Builder(thrift.py3.builder.StructBuilder):
test_enum: _typing.Optional[_test_fixtures_nolegacy_module_types.TestEnum]
code: _typing.Optional[int]
def __iter__(self) -> _typing.Iterator[_typing.Tuple[str, _typing.Any]]: ...
class TestMixin_Builder(thrift.py3.builder.StructBuilder):
field1: _typing.Optional[str]
def __iter__(self) -> _typing.Iterator[_typing.Tuple[str, _typing.Any]]: ...
class TestStruct_Builder(thrift.py3.builder.StructBuilder):
bar: _typing.Optional[str]
baropt: _typing.Optional[str]
test_error: _typing.Any
test_mixin: _typing.Any
def __iter__(self) -> _typing.Iterator[_typing.Tuple[str, _typing.Any]]: ...
class TestUnion_Builder(thrift.py3.builder.StructBuilder):
enumVal: _typing.Optional[_test_fixtures_nolegacy_module_types.TestEnum]
structVal: _typing.Any
def __iter__(self) -> _typing.Iterator[_typing.Tuple[str, _typing.Any]]: ...
|
85574d3a9dab7cafa7ec26d04ca510ba66ddc283
|
be815aacbd7b06ac0ce3f412831639aa0297b988
|
/deploy/docker/cp-node-reporter/stats/reporter.py
|
ec8f83187af50447f155e1f4664a3dc77e8d0ee1
|
[
"Apache-2.0"
] |
permissive
|
epam/cloud-pipeline
|
8a861dae60d0f86089ff55e2f278e8593fc5e112
|
570dd898e96de931b96e584c86e72296b0e40607
|
refs/heads/develop
| 2023-08-30T08:03:18.672866
| 2023-08-29T17:07:13
| 2023-08-29T17:07:13
| 174,065,041
| 155
| 73
|
Apache-2.0
| 2023-09-14T13:36:36
| 2019-03-06T03:34:40
|
Java
|
UTF-8
|
Python
| false
| false
| 5,861
|
py
|
reporter.py
|
# Copyright 2017-2022 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
import logging
import os
import resource
import socket
import sys
from abc import abstractmethod, ABC
from collections import namedtuple
from enum import Enum, auto
from logging.handlers import TimedRotatingFileHandler
import psutil
from flask import Flask
Value = namedtuple('Value', 'value')
Limit = namedtuple('Limit', 'soft,hard')
ProcStat = namedtuple('ProcStat', 'pid,name,type,current,limit')
class StatType(Enum):
NOFILE = auto()
class StatsResolver(ABC):
@abstractmethod
def get(self):
pass
class HostOpenFilesResolver(StatsResolver):
def __init__(self):
pass
def get(self):
logging.info('Collecting host open files stats...')
yield ProcStat(pid=0, name='all processes', type=StatType.NOFILE,
current=self._get_value(), limit=self._get_limit())
def _get_value(self):
with open('/proc/sys/fs/file-nr', 'r') as f:
# '1234\t0\t123456\n' -> 1234
value = int(f.read().strip().split('\t')[0])
return Value(value=value)
def _get_limit(self):
with open('/proc/sys/fs/file-max', 'r') as f:
# '123456\n' -> 123456
limit = int(f.read().strip())
return Limit(soft=limit, hard=limit)
class ProcOpenFilesResolver(StatsResolver):
def __init__(self, include=None):
self._include = include or []
def get(self):
logging.info('Collecting proc open files stats...')
for proc in self._find_procs(include=self._include):
yield ProcStat(pid=proc.pid, name=proc.name(), type=StatType.NOFILE,
current=self._get_value(proc), limit=self._get_limit(proc))
def _find_procs(self, include):
for proc in psutil.process_iter():
try:
proc_name = proc.name()
if proc_name in include:
yield proc
except Exception:
logging.exception('Skipping process #%s...', proc.pid)
def _get_value(self, proc):
return Value(value=proc.num_fds())
def _get_limit(self, proc):
soft_limit, hard_limit = resource.prlimit(proc.pid, resource.RLIMIT_NOFILE)
return Limit(soft=soft_limit, hard=hard_limit)
class StatsCollector:
def __init__(self, resolvers):
self._resolvers = resolvers
def collect(self):
logging.info('Initiating stats collection...')
for resolver in self._resolvers:
try:
yield from resolver.get()
except Exception:
logging.exception('Stats have not been collected by %s.', type(resolver).__name__)
logging.info('Stats collection has finished.')
class StatsViewer(ABC):
@abstractmethod
def view(self, stats):
pass
class JsonStatsViewer(StatsViewer):
def __init__(self, host):
self._host = host
self._datetime_format = '%Y-%m-%d %H:%M:%S.%f'
self._datetime_suffix_crop_length = 3
def view(self, stats):
host_view = {
'name': self._host,
'timestamp': datetime.datetime.now().strftime(self._datetime_format)[:-self._datetime_suffix_crop_length]
}
for stat in stats:
host_view['processes'] = host_view.get('processes', [])
proc_view = {'pid': stat.pid, 'name': stat.name}
proc_view['limits'] = proc_view.get('limits', {})
proc_view['limits'][stat.type.name] = {
'soft_limit': stat.limit.soft,
'hard_limit': stat.limit.hard
}
proc_view['stats'] = proc_view.get('stats', {})
proc_view['stats'][stat.type.name] = {
'value': stat.current.value
}
host_view['processes'].append(proc_view)
return host_view
logging_format = os.getenv('CP_LOGGING_FORMAT', default='%(asctime)s [%(threadName)s] [%(levelname)s] %(message)s')
logging_level = os.getenv('CP_LOGGING_LEVEL', default='DEBUG')
logging_file = os.getenv('CP_LOGGING_FILE', default='stats.log')
logging_history = int(os.getenv('CP_LOGGING_HISTORY', default='10'))
host = os.getenv('NODE_NAME', socket.gethostname())
procs_include = os.getenv('CP_NODE_REPORTER_STATS_PROCS_INCLUDE', 'dockerd,docker-containerd,containerd').split(',')
logging_formatter = logging.Formatter(logging_format)
logging.getLogger().setLevel(logging_level)
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(logging_formatter)
logging.getLogger().addHandler(console_handler)
file_handler = TimedRotatingFileHandler(logging_file, when='D', interval=1,
backupCount=logging_history)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logging_formatter)
logging.getLogger().addHandler(file_handler)
collector = StatsCollector(resolvers=[
HostOpenFilesResolver(),
ProcOpenFilesResolver(include=procs_include)])
viewer = JsonStatsViewer(host=host)
logging.info('Initializing...')
app = Flask(__name__)
@app.route('/')
def get_stats():
stats = collector.collect()
view = viewer.view(stats)
return json.dumps(view, indent=4)
|
be01aa4b5320c24c36554aca7188f564e6385216
|
aae3c6fccb2296e4da5bb10310f5dd6baba8b7de
|
/activitysim/cli/benchmark.py
|
af46766aaf565908d77b042407f1a625fba7f20d
|
[
"BSD-3-Clause"
] |
permissive
|
ActivitySim/activitysim
|
3d938e616452be76db1bb0c8a1212e12b9216823
|
a8e755f96d0e32633a6d3657c4878e3b6a37e59a
|
refs/heads/main
| 2023-08-08T16:02:06.275693
| 2023-05-09T13:08:23
| 2023-05-09T13:08:23
| 20,981,950
| 118
| 89
|
BSD-3-Clause
| 2023-07-25T14:07:16
| 2014-06-18T23:57:40
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 9,807
|
py
|
benchmark.py
|
import json
import os
import shutil
import subprocess
import sys
ASV_CONFIG = {
# The version of the config file format. Do not change, unless
# you know what you are doing.
"version": 1,
# The name of the project being benchmarked
"project": "activitysim",
# The project's homepage
"project_url": "https://activitysim.github.io/",
# The URL or local path of the source code repository for the
# project being benchmarked
"repo": ".",
# The tool to use to create environments.
"environment_type": "conda",
# the base URL to show a commit for the project.
"show_commit_url": "http://github.com/ActivitySim/activitysim/commit/",
# The Pythons you'd like to test against. If not provided, defaults
# to the current version of Python used to run `asv`.
# "pythons": ["2.7", "3.6"],
# The list of conda channel names to be searched for benchmark
# dependency packages in the specified order
"conda_channels": ["conda-forge"],
# The matrix of dependencies to test. Each key is the name of a
# package (in PyPI) and the values are version numbers. An empty
# list or empty string indicates to just test against the default
# (latest) version. null indicates that the package is to not be
# installed. If the package to be tested is only available from
# PyPi, and the 'environment_type' is conda, then you can preface
# the package name by 'pip+', and the package will be installed via
# pip (with all the conda available packages installed first,
# followed by the pip installed packages).
"matrix": {
"pyarrow": [],
"numpy": [],
"scipy": ["1.7"],
"openmatrix": [],
"pandas": ["1.3"],
"pyyaml": [],
"pytables": [],
"toolz": [],
"orca": [],
"psutil": [],
"requests": [],
"numba": ["0.54"],
"cytoolz": [],
"zarr": [],
"xarray": [],
"filelock": [],
"dask": [],
"networkx": [],
"sharrow": [],
},
# The directory (relative to the current directory) to cache the Python
# environments in. If not provided, defaults to "env"
# "env_dir": "../activitysim-asv/env",
# The directory (relative to the current directory) that raw benchmark
# results are stored in. If not provided, defaults to "results".
# "results_dir": "../activitysim-asv/results",
# The directory (relative to the current directory) that the html tree
# should be written to. If not provided, defaults to "html".
# "html_dir": "../activitysim-asv/html",
# List of branches to benchmark. If not provided, defaults to "master"
# (for git) or "default" (for mercurial).
"branches": ["develop"],
}
def make_asv_argparser(parser):
"""
The entry point for asv.
Most of this work is handed off to the airspeed velocity library.
"""
try:
from asv.commands import Command, command_order, common_args, util
except ImportError:
return
def help(args):
parser.print_help()
sys.exit(0)
common_args.add_global_arguments(parser, suppress_defaults=False)
subparsers = parser.add_subparsers(
title="benchmarking with airspeed velocity", description="valid subcommands"
)
help_parser = subparsers.add_parser("help", help="Display usage information")
help_parser.set_defaults(afunc=help)
commands = dict((x.__name__, x) for x in util.iter_subclasses(Command))
hide_commands = [
"quickstart",
]
for command in command_order:
if str(command) in hide_commands:
continue
subparser = commands[str(command)].setup_arguments(subparsers)
common_args.add_global_arguments(subparser)
subparser.add_argument(
"--workspace",
"-w",
help="benchmarking workspace directory",
default=".",
)
subparser.add_argument(
"--branch",
type=str,
action="append",
metavar="NAME",
help="git branch to include in benchmarking",
)
del commands[command]
for name, command in sorted(commands.items()):
if str(command) in hide_commands:
continue
subparser = command.setup_arguments(subparsers)
subparser.add_argument(
"--workspace",
"-w",
help="benchmarking workspace directory",
default=".",
)
subparser.add_argument(
"--branch",
type=str,
action="append",
metavar="NAME",
help="git branch to include in benchmarking",
)
common_args.add_global_arguments(subparser)
from ..benchmarking.latest import Batch, Latest
subparser = Latest.setup_arguments(subparsers)
subparser.add_argument(
"--workspace",
"-w",
help="benchmarking workspace directory",
default=".",
)
subparser.add_argument(
"--branch",
type=str,
action="append",
metavar="NAME",
help="git branch to include in benchmarking",
)
common_args.add_global_arguments(subparser)
subparser = Batch.setup_arguments(subparsers)
subparser.add_argument(
"--workspace",
"-w",
help="benchmarking workspace directory",
default=".",
)
from ..benchmarking.profile_inspector import ProfileInspector
subparser = ProfileInspector.setup_arguments(subparsers)
subparser.add_argument(
"--workspace",
"-w",
help="benchmarking workspace directory",
default=".",
)
parser.set_defaults(afunc=benchmark)
return parser, subparsers
def benchmark(args):
try:
import asv
except ModuleNotFoundError:
print("airspeed velocity is not installed")
print("try `conda install asv -c conda-forge` if you want to run benchmarks")
sys.exit(1)
from asv import util
from asv.console import log
log.enable(args.verbose)
log.info("<== benchmarking activitysim ==>")
# workspace
args.workspace = os.path.abspath(args.workspace)
if os.path.abspath(os.path.expanduser("~")) == args.workspace:
log.error(
"don't run benchmarks in the user's home directory \n"
"try changing directories before calling `activitysim benchmark` "
"or use the --workspace option \n"
)
sys.exit(1)
if not os.path.isdir(args.workspace):
raise NotADirectoryError(args.workspace)
log.info(f" workspace: {args.workspace}")
os.chdir(args.workspace)
os.environ["ASIM_ASV_WORKSPACE"] = str(args.workspace)
from ..benchmarking import workspace
workspace.set_dir(args.workspace)
from .. import __path__ as pkg_path
log.info(f" activitysim installation: {pkg_path[0]}")
repo_dir = os.path.normpath(os.path.join(pkg_path[0], ".."))
git_dir = os.path.normpath(os.path.join(repo_dir, ".git"))
local_git = os.path.exists(git_dir)
log.info(f" local git repo available: {local_git}")
branches = args.branch
asv_config = ASV_CONFIG.copy()
if local_git:
repo_dir_rel = os.path.relpath(repo_dir, args.workspace)
log.info(f" local git repo: {repo_dir_rel}")
asv_config["repo"] = repo_dir_rel
if not branches:
# add current branch to the branches to benchmark
current_branch = (
subprocess.check_output(
["git", "branch", "--show-current"],
env={"GIT_DIR": git_dir},
stdin=None,
stderr=None,
shell=False,
universal_newlines=False,
)
.decode()
.strip()
)
if current_branch:
asv_config["branches"].append(current_branch)
else:
log.info(f" local git repo available: {local_git}")
asv_config["repo"] = "https://github.com/ActivitySim/activitysim.git"
asv_config["branches"].extend(branches)
# copy the benchmarks to the workspace, deleting previous files in workspace
import activitysim.benchmarking.benchmarks
benchmarks_dir = os.path.dirname(activitysim.benchmarking.benchmarks.__file__)
shutil.rmtree(
os.path.join(args.workspace, "benchmarks"),
ignore_errors=True,
)
shutil.copytree(
benchmarks_dir,
os.path.join(args.workspace, "benchmarks"),
dirs_exist_ok=True,
)
# write the asv config to the workspace
conf_file = os.path.normpath(os.path.join(args.workspace, "asv.conf.json"))
with open(conf_file, "wt") as jf:
json.dump(asv_config, jf)
if args.config and args.config != "asv.conf.json":
raise ValueError(
"activitysim manages the asv config json file itself, do not use --config"
)
args.config = os.path.abspath(conf_file)
# write the pre-commit search and replace hook to the workspace
search_replace_file = os.path.normpath(
os.path.join(args.workspace, ".pre-commit-search-and-replace.yaml")
)
with open(search_replace_file, "wt") as sf:
benchpath = os.path.join(args.workspace, "benchmarks")
if not benchpath.endswith(os.path.sep):
benchpath += os.path.sep
benchpath = benchpath.replace(os.path.sep, r"[/\\]")
sf.write(f"""- search: /{benchpath}/\n replacement: ./\n""")
try:
result = args.func(args)
except util.UserError as e:
log.error(str(e))
sys.exit(1)
finally:
log.flush()
if result is None:
result = 0
sys.exit(result)
|
4fa0a58125dae241cc9b8d3743a4738865129dc4
|
e0cc314aa73c0a965a2022f19900df3ccc8c9f43
|
/windows/winobject/device_manager.py
|
72d905f7aa2b48b13286d54fde26c10add72c05b
|
[
"BSD-3-Clause"
] |
permissive
|
hakril/PythonForWindows
|
21823e743ee4ae7ff99e376378357833f2e19d26
|
82d0c5cc5c9b4d569dca2c755f26b947e3ff74f5
|
refs/heads/master
| 2023-06-08T03:25:50.354768
| 2023-05-26T07:31:06
| 2023-05-26T07:31:06
| 49,235,784
| 568
| 111
|
BSD-3-Clause
| 2023-01-30T12:04:03
| 2016-01-07T22:54:33
|
Python
|
UTF-8
|
Python
| false
| false
| 19,645
|
py
|
device_manager.py
|
import ctypes
import itertools
import windows
from windows import winproxy
import windows.generated_def as gdef
from windows.security import SecurityDescriptor
from windows.utils import fixedproperty
class DeviceManager(object):
"""Represent the device manager"""
@property
def classes(self):
"""The list of installed device classes.
:return: [:class:`DeviceClass`] -- A list of :class:`DeviceClass`
"""
return list(self._classes_generator())
def _classes_generator(self):
for index in itertools.count():
try:
yield self._enumerate_classes(index, 0)
except WindowsError as e:
if e.winerror == gdef.CR_NO_SUCH_VALUE:
break
# Some index values might represent list entries containing invalid class data,
# in which case the function returns CR_INVALID_DATA.
# This return value can be ignored.
if e.winerror == gdef.CR_INVALID_DATA:
continue
raise
def _enumerate_classes(self, index, flags=0):
res = DeviceClass()
x = winproxy.CM_Enumerate_Classes(index, res, flags)
return res
class DeviceClass(gdef.GUID):
"""A Device class, which is mainly a :class:`GUID` with additional attributes"""
def __init__(self):
# Bypass GUID __init__ that is not revelant here
pass
@fixedproperty
def name(self):
"""The name of the device class"""
return self._get_device_class_name()
@property
def devices(self):
"""The set of devices of the current class.
:type: :class:`DeviceInformationSet`
"""
return self.enumerate_devices()
def enumerate_devices(self, flags=0):
handle = winproxy.SetupDiGetClassDevsA(self, Flags=flags)
return DeviceInformationSet(handle)
def _get_device_class_name(self):
name = ctypes.create_string_buffer(gdef.MAX_CLASS_NAME_LEN)
winproxy.SetupDiClassNameFromGuidA(self, name)
return name.value
def __repr__(self):
guid_cls = self.to_string()
return """<{0} name="{1}" guid={2}>""".format(type(self).__name__, self.name, guid_cls)
__str__ = __repr__ # Overwrite default GUID str
class DeviceInformationSet(gdef.HDEVINFO):
"""A device instances, can be itered to retrieve the underliyings :class:`DeviceInstance`"""
def all_device_infos(self):
for index in itertools.count():
try:
yield self.enum_device_info(index)
except WindowsError as e:
if e.winerror == gdef.ERROR_NO_MORE_ITEMS:
return
raise
__iter__ = all_device_infos
def enum_device_info(self, index):
res = DeviceInstance(self)
res.cbSize = ctypes.sizeof(res)
winproxy.SetupDiEnumDeviceInfo(self, index, res)
return res
def enum_device_interface(self, index):
"""Not Implemented Yet"""
raise NotImplementedError("enum_device_interface")
def all(self):
return list(self)
class DeviceInstance(gdef.SP_DEVINFO_DATA):
"""An instance of a Device.
The properties are from the page https://docs.microsoft.com/en-us/windows/win32/api/setupapi/nf-setupapi-setupdigetdeviceregistrypropertya#spdrp_address
"""
def __init__(self, information_set=None):
self.information_set = information_set
# make a .device_class ? that return the DeviceClass ased in ClassGuid ?
def get_property(self, property):
datatype = gdef.DWORD()
buffer_size = 0x1000
buffer = windows.utils.BUFFER(gdef.BYTE, nbelt=buffer_size)()
required_size = gdef.DWORD()
# Registry parsing code expect W stuff, so use W function
try:
winproxy.SetupDiGetDeviceRegistryPropertyW(self.information_set, self, property, datatype, buffer.cast(gdef.LPBYTE), buffer_size, required_size)
except WindowsError as e:
if e.winerror == gdef.ERROR_INVALID_DATA:
return None
raise
# PropertyRegDataType
# A pointer to a variable that receives the data type of the property
# that is being retrieved.
# This is one of the standard registry data types
# Look like its registry based, so use the registry decoders :)
return windows.winobject.registry.decode_registry_buffer(datatype.value, buffer, required_size.value)
def _generate_property_getter(prop):
def getter(self):
return self.get_property(prop)
return property(getter)
name = _generate_property_getter(gdef.SPDRP_FRIENDLYNAME)
"""The name of the device"""
description = _generate_property_getter(gdef.SPDRP_DEVICEDESC)
"""The description of the device"""
hardware_id = _generate_property_getter(gdef.SPDRP_HARDWAREID)
"""The list of hardware IDs for the device.
(https://docs.microsoft.com/en-us/windows/win32/api/setupapi/nf-setupapi-setupdigetdeviceregistrypropertya#spdrp_hardwareid)
"""
enumerator_name = _generate_property_getter(gdef.SPDRP_ENUMERATOR_NAME)
"""The enumerator name of the devices
(https://docs.microsoft.com/en-us/windows/win32/api/setupapi/nf-setupapi-setupdigetdeviceregistrypropertya#spdrp_enumerator_name)
"""
driver = _generate_property_getter(gdef.SPDRP_DRIVER)
"""The driver of the device
https://docs.microsoft.com/en-us/windows/win32/api/setupapi/nf-setupapi-setupdigetdeviceregistrypropertya#spdrp_driver
"""
# Map on Device type ?
# https://docs.microsoft.com/en-us/windows-hardware/drivers/kernel/specifying-device-types
type = _generate_property_getter(gdef.SPDRP_DEVTYPE)
"""The type of device
(https://docs.microsoft.com/en-us/windows-hardware/drivers/kernel/specifying-device-types)
"""
upper_filters = _generate_property_getter(gdef.SPDRP_UPPERFILTERS)
"""A list of string that contains the names of a device's upper filter drivers."""
lower_filters = _generate_property_getter(gdef.SPDRP_LOWERFILTERS)
"""A list of string that contains the names of a device's lower filter drivers."""
raw_security_descriptor = _generate_property_getter(gdef.SPDRP_SECURITY)
"""The raw (binary) security descriptor of the device"""
# I would prefer to use the security_descriptor sddl
# ssdl = _generate_property_getter(gdef.SPDRP_SECURITY_SDS)
service_name = _generate_property_getter(gdef.SPDRP_SERVICE)
"""The name of the service for the device
(https://docs.microsoft.com/en-us/windows/win32/api/setupapi/nf-setupapi-setupdigetdeviceregistrypropertya#spdrp_service)
"""
manufacturer = _generate_property_getter(gdef.SPDRP_MFG)
"""The name of the device manufacturer."""
location_information = _generate_property_getter(gdef.SPDRP_LOCATION_INFORMATION)
"""The hardware location of a device."""
location_paths = _generate_property_getter(gdef.SPDRP_LOCATION_PATHS)
"""A list of strings that represents the location of the device in the device tree."""
# Looks like it can raise ERROR_NO_SUCH_DEVINST
# install_date = _generate_property_getter(gdef.SPDRP_INSTALL_STATE)
capabilites = _generate_property_getter(gdef.SPDRP_CAPABILITIES)
"""The device capabilites
(https://docs.microsoft.com/en-us/windows/win32/api/setupapi/nf-setupapi-setupdigetdeviceregistrypropertya#spdrp_capabilities)
"""
bus_type = _generate_property_getter(gdef.SPDRP_BUSTYPEGUID)
"""The function retrieves the GUID for the device's bus type."""
bus_number = _generate_property_getter(gdef.SPDRP_BUSNUMBER)
"""The device's bus number."""
address = _generate_property_getter(gdef.SPDRP_ADDRESS)
"""The device's address."""
ui_number = _generate_property_getter(gdef.SPDRP_UI_NUMBER)
"""Retrieves a DWORD value set to the value of the UINumber member of the device's"""
ui_number_desc_format = _generate_property_getter(gdef.SPDRP_UI_NUMBER_DESC_FORMAT)
# Getter with special error handling
@property
def device_object_name(self):
"""The function retrieves a string that contains the name that is associated with the device's PDO."""
try:
return self.get_property(gdef.SPDRP_PHYSICAL_DEVICE_OBJECT_NAME)
except WindowsError as e:
if e.winerror not in (gdef.ERROR_INVALID_DATA, gdef.ERROR_NO_SUCH_DEVINST):
raise
# https://docs.microsoft.com/en-us/windows-hardware/drivers/kernel/hardware-resources
# Explanation of types:
# - https://docs.microsoft.com/en-us/windows-hardware/drivers/kernel/hardware-resources#logical-configuration-types-for-resource-requirements-lists
def get_first_logical_configuration(self, type):
res = LogicalConfiguration()
try:
winproxy.CM_Get_First_Log_Conf(res, self.DevInst, type)
except WindowsError as e:
if e.winerror == gdef.CR_CALL_NOT_IMPLEMENTED:
e.strerror += " (Cannot be called from Wow64 process since Win8)"
raise
return res
def get_next_logical_configuration(self, logconf):
res = gdef.HANDLE(0)
winproxy.CM_Get_Next_Log_Conf(res, logconf)
return res
def _logical_configuration_generator(self, type):
x = self.get_first_logical_configuration(type)
while x:
yield x
try:
x = self.get_next_logical_configuration(x)
except WindowsError as e:
if e.winerror == gdef.CR_NO_MORE_LOG_CONF:
return
raise
def get_logical_configuration(self, type):
return list(self._logical_configuration_generator(type))
# Allocated Configuration
# From https://docs.microsoft.com/en-us/windows-hardware/drivers/kernel/hardware-resources#logical-configuration-types-for-resource-lists
# A resource list identifying resources currently in use by a device instance.
# !!! Only one allocated configuration can exist for each device instance.
@property
def allocated_configuration(self):
"""The allocated configuration of the device.
(https://docs.microsoft.com/en-us/windows-hardware/drivers/kernel/hardware-resources#logical-configuration-types-for-resource-lists)
:type: :class:`LogicalConfiguration`
"""
allocconfs = self.get_logical_configuration(gdef.ALLOC_LOG_CONF)
if not allocconfs:
return allocconfs
assert len(allocconfs) == 1 # Only one allocated configuration can exist for each device instance.
return allocconfs[0]
# Boot Configuration
# From https://docs.microsoft.com/en-us/windows-hardware/drivers/kernel/hardware-resources#logical-configuration-types-for-resource-lists
# A resource list identifying the resources assigned to a device instance when the system is booted
# Only one boot configuration can exist for each device instance.
@property
def boot_configuration(self):
"""The boot configuration of the device.
(https://docs.microsoft.com/en-us/windows-hardware/drivers/kernel/hardware-resources#logical-configuration-types-for-resource-lists)
:type: :class:`LogicalConfiguration`
"""
bootconfs = self.get_logical_configuration(gdef.BOOT_LOG_CONF)
if not bootconfs:
return bootconfs
assert len(bootconfs) == 1 # Only one boot configuration can exist for each device instance.
return bootconfs[0]
# Make properties for Each type of logical configuration ?
# 'advanced' attributes extrapolated from properties
@property
def security_descriptor(self):
"""The security descriptor of the device.
:type: :class:`~windows.security.SecurityDescriptor`
"""
return SecurityDescriptor.from_binary(self.raw_security_descriptor)
def __repr__(self):
return """<{0} "{1}" (id={2})>""".format(type(self).__name__, self.description, self.DevInst)
class LogicalConfiguration(gdef.HANDLE):
"""Logical Configuration of a Device instance"""
def get_next_resource_descriptor(self, resource, resdes=None):
if resdes is None:
# Using logical-conf as resdes will retrieve the first one
# https://docs.microsoft.com/en-us/windows/win32/api/cfgmgr32/nf-cfgmgr32-cm_get_next_res_des#remarks
resdes = self
resid = None
if resource == gdef.ResType_All:
resid = gdef.RESOURCEID()
res = gdef.HANDLE()
winproxy.CM_Get_Next_Res_Des(res, resdes, resource, resid, 0)
resdes_type = resid.value if resid is not None else resource
return ResourceDescriptor.from_handle_and_type(res.value, resdes_type)
def get_resources_for_type(self, type):
try:
current = self.get_next_resource_descriptor(type)
yield current
while True:
current = self.get_next_resource_descriptor(type, current)
yield current
except WindowsError as e:
if e.winerror == gdef.CR_NO_MORE_RES_DES:
return
raise
@property
def resources(self):
"""The list of resources in the current logical configuration
:type: [:class:`ResourceDescriptor`] -- A list of [:class:`ResourceDescriptor`]
"""
return list(self.get_resources_for_type(gdef.ResType_All))
def __repr__(self):
return "<{0}>".format(type(self).__name__)
ResType_Mapper = gdef.FlagMapper(
gdef.ResType_None,
gdef.ResType_Mem,
gdef.ResType_IO,
gdef.ResType_DMA,
gdef.ResType_IRQ,
gdef.ResType_BusNumber,
gdef.ResType_MemLarge,
gdef.ResType_ClassSpecific,
gdef.ResType_DevicePrivate,
gdef.ResType_MfCardConfig,
gdef.ResType_PcCardConfig,
)
class ResourceDescriptor(gdef.HANDLE):
"""Describe a resource allocated or reserved by a device instance.
This class is a base class, all resources returned by :class:`LogicalConfiguration` should be one of the following:
* :class:`ResourceNoType`
* :class:`MemoryResource`
* :class:`IoResource`
* :class:`DmaResource`
* :class:`IrqResource`
* :class:`BusNumberResource`
* :class:`MemLargeResource`
* :class:`ClassSpecificResource`
* :class:`DevicePrivateResource`
* :class:`MfCardConfigResource`
* :class:`PcCardConfigResource`
"""
SUBCLASSES = {}
def __init__(self, handle, type):
super(ResourceDescriptor, self).__init__(handle)
self.type = ResType_Mapper[type]
@classmethod
def from_handle_and_type(cls, handle, type):
ecls = cls.SUBCLASSES[type]
return ecls(handle, type)
@property
def rawdata(self):
"""The raw data describing the resource"""
data_size = gdef.ULONG()
winproxy.CM_Get_Res_Des_Data_Size(data_size, self)
if not self:
return None
data_size = data_size.value
buffer = ctypes.create_string_buffer(data_size)
winproxy.CM_Get_Res_Des_Data(self, buffer, data_size)
return bytearray(buffer[:data_size])
def __repr__(self):
return "<{0} type={1!r}>".format(type(self).__name__, self.type)
class ResourceDescriptorWithHeader(ResourceDescriptor):
# Assert the header is the first field
@property
def header_type(self):
# Type of first field
return self.DATA_TYPE._fields_[0][1]
@property
def header(self):
return self.header_type.from_buffer(self.rawdata)
@property
def data(self):
return None
class ResourceDescriptorWithHeaderAndRanges(ResourceDescriptorWithHeader):
def count_field_name(self):
# Assert (manyally checked) that the first field of the
# header is a field containing the size of the data array
# Return name of the first field of the header
return self.header_type._fields_[0][0]
@property
def data(self):
count_field_name = self.count_field_name()
count = getattr(self.header, count_field_name)
# No entry:
if not count:
return []
raise NotImplementedError("Resource descriptor with non-zero entry in range array")
class ResourceNoType(ResourceDescriptor):
@property
def data(self):
return self.rawdata
class MemoryResource(ResourceDescriptorWithHeaderAndRanges):
"""A resource of type MEM_RESOURCE"""
DATA_TYPE = gdef.MEM_RESOURCE
def __str__(self):
return "<{0} : [{1:#016x}-{2:#016x}]>".format(type(self).__name__, self.header.MD_Alloc_Base, self.header.MD_Alloc_End)
class IoResource(ResourceDescriptorWithHeaderAndRanges):
"""A resource of type IO_RESOURCE"""
DATA_TYPE = gdef.IO_RESOURCE
def __str__(self):
return "<{0} : [{1:#016x}-{2:#016x}]>".format(type(self).__name__, self.header.IOD_Alloc_Base, self.header.IOD_Alloc_End)
class DmaResource(ResourceDescriptorWithHeaderAndRanges):
"""A resource of type DMA_RESOURCE"""
DATA_TYPE = gdef.DMA_RESOURCE
def __str__(self):
return "<{0} : [{1:#016x}]>".format(type(self).__name__, self.header.DD_Alloc_Chan)
class IrqResource(ResourceDescriptorWithHeaderAndRanges):
"""A resource of type IRQ_RESOURCE"""
# 32/64 based on current process bitness
# Cross bitness cannot be implemented as >=Win8 block it
DATA_TYPE = gdef.IRQ_RESOURCE
def __str__(self):
return "<{0} : [{1:#016x}]>".format(type(self).__name__, self.header.IRQD_Alloc_Num)
class BusNumberResource(ResourceDescriptorWithHeaderAndRanges):
"""A resource of type BUSNUMBER_RESOURCE"""
DATA_TYPE = gdef.BUSNUMBER_RESOURCE
def __str__(self):
return "<{0} : [{1:#016x}-{2:#016x}]>".format(type(self).__name__, self.header.BUSD_Alloc_Base, self.header.BUSD_Alloc_End)
class MemLargeResource(ResourceDescriptor):
"""A resource of type MEM_LARGE_RESOURCE"""
DATA_TYPE = gdef.MEM_LARGE_RESOURCE
def __str__(self):
return "<{0} : [{1:#016x}-{2:#016x}]>".format(type(self).__name__, self.header.MLD_Alloc_Base, self.header.MLD_Alloc_End)
class ClassSpecificResource(ResourceDescriptorWithHeader):
"""A resource of type CS_RESOURCE"""
DATA_TYPE = gdef.CS_RESOURCE
# Any idea for __str__ ?
class DevicePrivateResource(ResourceDescriptor):
"""A device private resource
(https://docs.microsoft.com/en-us/windows-hardware/drivers/install/devprivate-resource)
"""
@property
def header(self):
return None
# Any idea for __str__ ?
class MfCardConfigResource(ResourceDescriptorWithHeader):
"""A resource of type MFCARD_RESOURCE"""
DATA_TYPE = gdef.MFCARD_RESOURCE
# Any idea for __str__ ?
class PcCardConfigResource(ResourceDescriptorWithHeader):
"""A resource of type PCCARD_RESOURCE"""
DATA_TYPE = gdef.PCCARD_RESOURCE
# Any idea for __str__ ?
# Flemme de faire une meta-classe pour ca..
ResourceDescriptor.SUBCLASSES.update({
gdef.ResType_None: ResourceNoType,
gdef.ResType_Mem: MemoryResource,
gdef.ResType_IO: IoResource,
gdef.ResType_DMA: DmaResource,
gdef.ResType_IRQ: IrqResource,
gdef.ResType_BusNumber: BusNumberResource,
gdef.ResType_MemLarge: MemLargeResource,
gdef.ResType_ClassSpecific: ClassSpecificResource,
gdef.ResType_DevicePrivate: DevicePrivateResource,
gdef.ResType_MfCardConfig: MfCardConfigResource,
gdef.ResType_PcCardConfig: PcCardConfigResource,
})
|
804b847b7e03cf8ceb11e3a8b89eb85f763655b0
|
03a7f7a7eb8c16b537b65ec21f465bb0335bc3b8
|
/pythran/tests/cases/zero.py
|
e3dba86ead2a2fa7e3490bb979c117e981c76230
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
serge-sans-paille/pythran
|
a0e22af1ac5e1f34f3f29dce36502f4a897b5186
|
d8ab07b4b3b690f50603cb4d08ba303d3af18b90
|
refs/heads/master
| 2023-09-01T16:04:03.289285
| 2023-08-30T09:13:58
| 2023-08-31T08:03:22
| 4,479,494
| 1,882
| 200
|
BSD-3-Clause
| 2023-09-06T20:08:10
| 2012-05-29T08:02:14
|
C++
|
UTF-8
|
Python
| false
| false
| 142
|
py
|
zero.py
|
#pythran export zero(int, int)
#runas zero(10,20)
#bench zero(6000,6000)
def zero(n,m): return [[0 for row in range(n)] for col in range(m)]
|
6d6f46316a480ce8653fdf15c2fb23fdf2889c0d
|
4e0a88e163d97d873d8d7e58465502c87bc86054
|
/tests/test_quality.py
|
6b631822684d895758b381fa7c1c0cfba0cc2d72
|
[
"Apache-2.0"
] |
permissive
|
HLasse/TextDescriptives
|
ac70f320de86d227efc0d896f1990de9dde4710a
|
d73d25fe181975a66584e9d4255e42fa556ffdc9
|
refs/heads/main
| 2023-08-25T00:13:33.965333
| 2023-08-22T07:28:40
| 2023-08-22T07:28:40
| 236,710,916
| 231
| 19
|
Apache-2.0
| 2023-09-12T09:03:00
| 2020-01-28T10:37:59
|
Python
|
UTF-8
|
Python
| false
| false
| 9,305
|
py
|
test_quality.py
|
""" Tests for the quality module."""
from typing import List, Tuple
import pytest
import spacy
import textdescriptives as td
from textdescriptives.components.quality import (
alpha_ratio,
duplicate_ngram_fraction,
mean_word_length,
n_stop_words,
oov_ratio,
proportion_bullet_points,
proportion_ellipsis,
symbol_to_word_ratio,
top_ngram_chr_fraction,
)
@pytest.fixture
def nlp():
"""Load a blank English model."""
return spacy.blank("en")
@pytest.mark.parametrize(
"text, stop_words",
[
("", 0),
("This is a test.", 3),
("This is a test. This is a test.", 6),
],
)
def test_n_stop_words(text: str, stop_words: int, nlp: spacy.Language):
"""Test the n_stop_words function."""
doc = nlp(text)
assert n_stop_words(doc) == stop_words
# test mean word length
@pytest.mark.parametrize(
"text, mean_length",
[
("", 0),
("This is a test.", 2.4),
("This is a test. This is a test.", 2.4),
],
)
def test_mean_word_length(text: str, mean_length: float, nlp: spacy.Language):
"""Test the mean_word_length function."""
doc = nlp(text)
assert mean_word_length(doc) == mean_length
# test alpha ratio
@pytest.mark.parametrize(
"text, alpha",
[
("", 0),
("This is a test.", 0.8),
("This,, is a test. 12355 is €%&/( a <<s.", 0.38),
("This is a test. This is a test. 123", 0.72),
],
)
def test_alpha_ratio(text: str, alpha: float, nlp: spacy.Language):
"""Test the alpha_ratio function."""
doc = nlp(text)
assert abs(alpha_ratio(doc) - alpha) < 0.01
# test proportion bullet points
@pytest.mark.parametrize(
"text, bullet_points",
[
("", 0),
("- This is a test.", 1),
("- This is a test. \nThis is a test.", 0.5),
("* This is a test.", 1),
],
)
def test_proportion_bullet_points(text: str, bullet_points: float, nlp: spacy.Language):
"""Test the proportion_bullet_points function."""
doc = nlp(text)
assert abs(proportion_bullet_points(doc) - bullet_points) < 0.01
# test proportion ellipsis
@pytest.mark.parametrize(
"text, ellipsis",
[
("", 0),
("This is a test...", 1),
("This is a test. \nThis is a test...", 0.5),
],
)
def test_proportion_ellipsis(text: str, ellipsis: float, nlp: spacy.Language):
"""Test the proportion_ellipsis function."""
doc = nlp(text)
assert proportion_ellipsis(doc) == ellipsis
# test symbol to word ratio
@pytest.mark.parametrize(
"text, symbol_to_word",
[
("", 0),
("This is a test.", 0),
("# test symbol to word ratio", 0.2),
("#### test symbol to word ratio", 0.8),
],
)
def test_symbol_to_word_ratio(text: str, symbol_to_word: float, nlp: spacy.Language):
"""Test the symbol_to_word_ratio function."""
doc = nlp(text)
assert abs(symbol_to_word_ratio(doc, symbol="#") - symbol_to_word) < 0.01
# test duplicate ngram fraction
@pytest.mark.parametrize(
"text, duplicate_ngram, ngram_range",
[
("", (0, 0), (2, 3)),
("This is a test.", (0, 0), (2, 3)),
("This is a test. This is a test.", (1,), (2, 2)),
("This is a test. This is a test.", (1, 1), (2, 3)),
("This is a test. This is a test. This is not a test.", (0.90, 0.90), (2, 3)),
(
"This is a test. This is maybe a test. This is not a test.",
(0.789, 0.649),
(2, 3),
),
("This is a test. This is a test. This is a test.", (1,), (3, 3)),
("This is a test. This is a test. This is a test.", (1, 1, 1, 1), (2, 5)),
("This is a test, where there is no duplicate ngram.", (0, 0), (2, 3)),
("This is a test. This is a test.", (0,), (8, 8)),
],
)
def test_duplicate_ngram_chr_fraction(
text: str,
duplicate_ngram: List[float],
ngram_range: Tuple[int, int],
nlp: spacy.Language,
):
"""Test the duplicate_ngram_fraction function."""
doc = nlp(text)
d = duplicate_ngram_fraction(doc, ngram_range=ngram_range)
assert d, "duplicate_ngram_fraction should not be empty"
for i, j in zip(d, duplicate_ngram): # , strict=True): # for python >3.10
assert abs(d[i] - j) < 0.01
# test top ngram chr fraction
@pytest.mark.parametrize(
"text, top_ngram_chr_frac, ngram_range",
[
("", (0, 0), (2, 3)),
("This is a test.", (0.466, 0.6), (2, 3)),
("This is a test. This is a monkey", (0.437, 0.562, 0.437), (2, 4)),
(
"This is a test. This is a monkey. This is a star.",
(0.428, 0.551, 0.449),
(2, 4),
),
],
)
def test_top_ngram_chr_fraction(
text: str,
top_ngram_chr_frac: List[float],
ngram_range: Tuple[int, int],
nlp: spacy.Language,
):
"""Test the top_ngram_chr_fraction function."""
doc = nlp(text)
top_ngram_fractions = top_ngram_chr_fraction(doc, ngram_range=ngram_range)
for i, j in zip(top_ngram_fractions.values(), top_ngram_chr_frac):
assert abs(i - j) < 0.01
def test_quality_component(nlp: spacy.Language):
"""Test the quality component."""
nlp.add_pipe("textdescriptives/quality", config={"force": True})
doc = nlp("This is a test. This is a test. This is a test.")
quality = doc._.quality
assert quality.n_stop_words == 9
assert quality.mean_word_length == 2.4
assert quality.alpha_ratio == 0.8
assert quality.proportion_bullet_points == 0
assert quality.proportion_ellipsis == 0
assert quality.symbol_to_word_ratio["#"] == 0
assert quality.duplicate_ngram_chr_fraction["5"] == 1
assert abs(quality.top_ngram_chr_fraction["2"].value - 0.44) < 0.01
assert doc._.passed_quality_check is False
assert quality.oov_ratio.value is None
assert quality.passed is False
def test_quality_component_with_config(nlp: spacy.Language):
"""Test the quality component with config."""
quality_thresholds = td.QualityThresholds(
n_stop_words=(3, None),
alpha_ratio=(None, 0.8),
mean_word_length=(1, 10),
doc_length=(10, 100_000),
symbol_to_word_ratio={".": (None, 0.3)},
proportion_ellipsis=(None, 0.3),
proportion_bullet_points=(None, 0.8),
duplicate_line_chr_fraction=(None, 0.2),
duplicate_paragraph_chr_fraction=(None, 0.2),
top_ngram_chr_fraction={2: (None, 0.6), 3: (None, 0.6)},
duplicate_ngram_chr_fraction={},
contains={"lorem ipsum": False},
oov_ratio=(None, 0.3),
)
quality_pipe = nlp.add_pipe(
"textdescriptives/quality",
config={
"symbols": ["."],
"force": True,
},
)
quality_pipe.set_quality_thresholds(quality_thresholds)
doc = nlp("This is a test. This is a test. This is a test.")
assert doc._.quality.n_stop_words == 9
assert doc._.quality.mean_word_length == 2.4
assert doc._.quality.alpha_ratio == 0.8
assert doc._.quality.proportion_bullet_points == 0
assert doc._.quality.proportion_ellipsis == 0
assert doc._.quality.symbol_to_word_ratio["."] == 0.25
assert doc._.quality.duplicate_ngram_chr_fraction["5"] == 1
assert doc._.quality.duplicate_ngram_chr_fraction["8"] == 1
assert abs(doc._.quality.top_ngram_chr_fraction["3"].value - 0.57) < 0.01
assert doc._.passed_quality_check is True
assert doc._.quality.oov_ratio.value is None
@pytest.mark.parametrize(
"text, passed",
[
("", False),
(
"This is a reasonable text, which has a very good sentence structure and "
+ "will therefore pass the quality check.",
True,
),
(
"This is repitious text, This is repitious text, This is repitious text.",
False,
),
("This test has many symbols #!@#$%^&*()_+.", False),
("- this is a text of \n - bullet points", False),
("", False), # test that it handles empty strings
],
)
def test_passed_quality_check(text: str, passed: bool, nlp: spacy.Language):
"""Test the passed_quality_check attribute."""
nlp.add_pipe("textdescriptives/quality", config={"force": True})
doc = nlp(text)
assert doc._.passed_quality_check == passed
def test_quality_multi_process(nlp):
texts = [
"A couple of texts here, yeah yeah yeah.",
"This is a second text, no repetition what so ever.",
]
nlp.add_pipe("textdescriptives/quality", config={"force": True})
docs = nlp.pipe(texts, n_process=2)
for doc in docs:
assert doc._.quality
@pytest.mark.parametrize(
"text,expected,vocab",
[
("This is a test", 0, None),
("This is a nonwrod", 0.25, None),
("This is a test", 0, {"This", "is", "a", "test"}),
("This is a nonwrod", 0.25, {"This", "is", "a", "test"}),
],
)
def test_oov_ratio(text, expected, vocab):
"""Test the oov_ratio function."""
nlp = spacy.load("en_core_web_md")
doc = nlp(text)
assert oov_ratio(doc, vocab) == expected
def test_oov_ratio_small_model():
nlp = spacy.load("en_core_web_sm")
nlp.add_pipe("textdescriptives/quality")
doc = nlp("This is a test")
assert doc._.quality.oov_ratio.value is None
|
ffcde84ad1fd54b47f8d68485dc5e3ad96043250
|
975b2d421d3661e6770b601929d5f11d981d8985
|
/msgraph/generated/models/operation.py
|
6529bddb19c584a45c02036fb309e5a79886052f
|
[
"MIT"
] |
permissive
|
microsoftgraph/msgraph-sdk-python
|
a7c551b85daadeebf76ec4ae12668664ea639b42
|
27de7ccbe688d7614b2f6bde0fdbcda4bc5cc949
|
refs/heads/main
| 2023-09-03T21:45:27.989672
| 2023-08-31T06:22:18
| 2023-08-31T06:22:18
| 534,665,999
| 135
| 18
|
MIT
| 2023-09-14T11:04:11
| 2022-09-09T14:00:17
|
Python
|
UTF-8
|
Python
| false
| false
| 3,287
|
py
|
operation.py
|
from __future__ import annotations
import datetime
from dataclasses import dataclass, field
from kiota_abstractions.serialization import Parsable, ParseNode, SerializationWriter
from typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union
if TYPE_CHECKING:
from .entity import Entity
from .onenote_operation import OnenoteOperation
from .operation_status import OperationStatus
from .entity import Entity
@dataclass
class Operation(Entity):
# The start time of the operation.
created_date_time: Optional[datetime.datetime] = None
# The time of the last action of the operation.
last_action_date_time: Optional[datetime.datetime] = None
# The OdataType property
odata_type: Optional[str] = None
# The current status of the operation: notStarted, running, completed, failed
status: Optional[OperationStatus] = None
@staticmethod
def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> Operation:
"""
Creates a new instance of the appropriate class based on discriminator value
Args:
parse_node: The parse node to use to read the discriminator value and create the object
Returns: Operation
"""
if not parse_node:
raise TypeError("parse_node cannot be null.")
try:
mapping_value = parse_node.get_child_node("@odata.type").get_str_value()
except AttributeError:
mapping_value = None
if mapping_value and mapping_value.casefold() == "#microsoft.graph.onenoteOperation".casefold():
from .onenote_operation import OnenoteOperation
return OnenoteOperation()
return Operation()
def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:
"""
The deserialization information for the current model
Returns: Dict[str, Callable[[ParseNode], None]]
"""
from .entity import Entity
from .onenote_operation import OnenoteOperation
from .operation_status import OperationStatus
from .entity import Entity
from .onenote_operation import OnenoteOperation
from .operation_status import OperationStatus
fields: Dict[str, Callable[[Any], None]] = {
"createdDateTime": lambda n : setattr(self, 'created_date_time', n.get_datetime_value()),
"lastActionDateTime": lambda n : setattr(self, 'last_action_date_time', n.get_datetime_value()),
"status": lambda n : setattr(self, 'status', n.get_enum_value(OperationStatus)),
}
super_fields = super().get_field_deserializers()
fields.update(super_fields)
return fields
def serialize(self,writer: SerializationWriter) -> None:
"""
Serializes information the current object
Args:
writer: Serialization writer to use to serialize this model
"""
if not writer:
raise TypeError("writer cannot be null.")
super().serialize(writer)
writer.write_datetime_value("createdDateTime", self.created_date_time)
writer.write_datetime_value("lastActionDateTime", self.last_action_date_time)
writer.write_enum_value("status", self.status)
|
e403b42397ce2716bfe0d42c31059b6c37016419
|
e90bf4b372da78ceec15282d060b48d18ba8d4e9
|
/supervisor/plugins/audio.py
|
558d5fa930436177637417ce50916fb7f9eab8c4
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/supervisor
|
67f2e1755ff5fbf7cf2084351e1c32c6995274e0
|
4838b280adafed0997f32e021274b531178386cd
|
refs/heads/main
| 2023-08-31T22:51:25.949277
| 2023-08-31T08:01:42
| 2023-08-31T08:01:42
| 84,926,758
| 928
| 477
|
Apache-2.0
| 2023-09-14T17:11:27
| 2017-03-14T08:54:15
|
Python
|
UTF-8
|
Python
| false
| false
| 7,585
|
py
|
audio.py
|
"""Home Assistant audio plugin.
Code: https://github.com/home-assistant/plugin-audio
"""
import asyncio
from contextlib import suppress
import logging
from pathlib import Path, PurePath
import shutil
from awesomeversion import AwesomeVersion
import jinja2
from ..const import LogLevel
from ..coresys import CoreSys
from ..docker.audio import DockerAudio
from ..docker.const import ContainerState
from ..docker.stats import DockerStats
from ..exceptions import (
AudioError,
AudioJobError,
AudioUpdateError,
ConfigurationFileError,
DockerError,
)
from ..jobs.const import JobExecutionLimit
from ..jobs.decorator import Job
from ..utils.json import write_json_file
from ..utils.sentry import capture_exception
from .base import PluginBase
from .const import (
FILE_HASSIO_AUDIO,
PLUGIN_UPDATE_CONDITIONS,
WATCHDOG_THROTTLE_MAX_CALLS,
WATCHDOG_THROTTLE_PERIOD,
)
from .validate import SCHEMA_AUDIO_CONFIG
_LOGGER: logging.Logger = logging.getLogger(__name__)
# pylint: disable=no-member
PULSE_CLIENT_TMPL: Path = Path(__file__).parents[1].joinpath("data/pulse-client.tmpl")
ASOUND_TMPL: Path = Path(__file__).parents[1].joinpath("data/asound.tmpl")
# pylint: enable=no-member
class PluginAudio(PluginBase):
"""Home Assistant core object for handle audio."""
def __init__(self, coresys: CoreSys):
"""Initialize hass object."""
super().__init__(FILE_HASSIO_AUDIO, SCHEMA_AUDIO_CONFIG)
self.slug = "audio"
self.coresys: CoreSys = coresys
self.instance: DockerAudio = DockerAudio(coresys)
self.client_template: jinja2.Template | None = None
@property
def path_extern_pulse(self) -> PurePath:
"""Return path of pulse socket file."""
return self.sys_config.path_extern_audio.joinpath("external")
@property
def path_extern_asound(self) -> PurePath:
"""Return path of default asound config file."""
return self.sys_config.path_extern_audio.joinpath("asound")
@property
def pulse_audio_config(self) -> Path:
"""Return Path to pulse audio config file."""
return Path(self.sys_config.path_audio, "pulse_audio.json")
@property
def latest_version(self) -> AwesomeVersion | None:
"""Return latest version of Audio."""
return self.sys_updater.version_audio
async def load(self) -> None:
"""Load Audio setup."""
# Initialize Client Template
try:
self.client_template = jinja2.Template(
PULSE_CLIENT_TMPL.read_text(encoding="utf-8")
)
except OSError as err:
_LOGGER.error("Can't read pulse-client.tmpl: %s", err)
await super().load()
# Setup default asound config
asound = self.sys_config.path_audio.joinpath("asound")
if not asound.exists():
try:
shutil.copy(ASOUND_TMPL, asound)
except OSError as err:
_LOGGER.error("Can't create default asound: %s", err)
async def install(self) -> None:
"""Install Audio."""
_LOGGER.info("Setup Audio plugin")
while True:
# read audio tag and install it
if not self.latest_version:
await self.sys_updater.reload()
if self.latest_version:
with suppress(DockerError):
await self.instance.install(
self.latest_version, image=self.sys_updater.image_audio
)
break
_LOGGER.warning("Error on installing Audio plugin, retrying in 30sec")
await asyncio.sleep(30)
_LOGGER.info("Audio plugin now installed")
self.version = self.instance.version
self.image = self.sys_updater.image_audio
self.save_data()
@Job(
name="plugin_audio_update",
conditions=PLUGIN_UPDATE_CONDITIONS,
on_condition=AudioJobError,
)
async def update(self, version: str | None = None) -> None:
"""Update Audio plugin."""
version = version or self.latest_version
old_image = self.image
if version == self.version:
_LOGGER.warning("Version %s is already installed for Audio", version)
return
try:
await self.instance.update(version, image=self.sys_updater.image_audio)
except DockerError as err:
raise AudioUpdateError("Audio update failed", _LOGGER.error) from err
self.version = version
self.image = self.sys_updater.image_audio
self.save_data()
# Cleanup
with suppress(DockerError):
await self.instance.cleanup(old_image=old_image)
# Start Audio
await self.start()
async def restart(self) -> None:
"""Restart Audio plugin."""
_LOGGER.info("Restarting Audio plugin")
self._write_config()
try:
await self.instance.restart()
except DockerError as err:
raise AudioError("Can't start Audio plugin", _LOGGER.error) from err
async def start(self) -> None:
"""Run Audio plugin."""
_LOGGER.info("Starting Audio plugin")
self._write_config()
try:
await self.instance.run()
except DockerError as err:
raise AudioError("Can't start Audio plugin", _LOGGER.error) from err
async def stop(self) -> None:
"""Stop Audio plugin."""
_LOGGER.info("Stopping Audio plugin")
try:
await self.instance.stop()
except DockerError as err:
raise AudioError("Can't stop Audio plugin", _LOGGER.error) from err
async def stats(self) -> DockerStats:
"""Return stats of Audio plugin."""
try:
return await self.instance.stats()
except DockerError as err:
raise AudioError() from err
async def repair(self) -> None:
"""Repair Audio plugin."""
if await self.instance.exists():
return
_LOGGER.info("Repairing Audio %s", self.version)
try:
await self.instance.install(self.version)
except DockerError as err:
_LOGGER.error("Repair of Audio failed")
capture_exception(err)
def pulse_client(self, input_profile=None, output_profile=None) -> str:
"""Generate an /etc/pulse/client.conf data."""
if self.client_template is None:
return ""
# Process Template
return self.client_template.render(
audio_address=self.sys_docker.network.audio,
default_source=input_profile,
default_sink=output_profile,
)
def _write_config(self):
"""Write pulse audio config."""
try:
write_json_file(
self.pulse_audio_config,
{
"debug": self.sys_config.logging == LogLevel.DEBUG,
},
)
except ConfigurationFileError as err:
raise AudioError(
f"Can't update pulse audio config: {err}", _LOGGER.error
) from err
@Job(
name="plugin_audio_restart_after_problem",
limit=JobExecutionLimit.THROTTLE_RATE_LIMIT,
throttle_period=WATCHDOG_THROTTLE_PERIOD,
throttle_max_calls=WATCHDOG_THROTTLE_MAX_CALLS,
on_condition=AudioJobError,
)
async def _restart_after_problem(self, state: ContainerState):
"""Restart unhealthy or failed plugin."""
return await super()._restart_after_problem(state)
|
30436dce7ef17d2ca5c349a22e648f4f081c0a0c
|
72293b4650b92019f9c046133f7de13ea6f69644
|
/zulip/integrations/codebase/zulip_codebase_config.py
|
5ea2ed2cb5ba28c0532e79709a2fe1cc36d5b024
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
zulip/python-zulip-api
|
820978c36689db4872abf21730b25ce8abb5fbcf
|
35a8ff8839ac39cff0638f533fea59665cb9aff3
|
refs/heads/main
| 2023-09-03T14:04:46.920347
| 2023-06-12T21:03:10
| 2023-08-11T19:36:11
| 96,455,158
| 387
| 437
|
Apache-2.0
| 2023-08-11T19:36:12
| 2017-07-06T17:25:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,460
|
py
|
zulip_codebase_config.py
|
from typing import Optional
# Change these values to configure authentication for your codebase account
# Note that this is the Codebase API Username, found in the Settings page
# for your account
CODEBASE_API_USERNAME = "foo@example.com"
CODEBASE_API_KEY = "1234561234567abcdef"
# The URL of your codebase setup
CODEBASE_ROOT_URL = "https://YOUR_COMPANY.codebasehq.com"
# When initially started, how many hours of messages to include.
# Note that the Codebase API only returns the 20 latest events,
# if you have more than 20 events that fit within this window,
# earlier ones may be lost
CODEBASE_INITIAL_HISTORY_HOURS = 12
# Change these values to configure Zulip authentication for the plugin
ZULIP_USER = "codebase-bot@example.com"
ZULIP_API_KEY = "0123456789abcdef0123456789abcdef"
# The streams to send commit information and ticket information to
ZULIP_COMMITS_STREAM_NAME = "codebase"
ZULIP_TICKETS_STREAM_NAME = "tickets"
# If properly installed, the Zulip API should be in your import
# path, but if not, set a custom path below
ZULIP_API_PATH: Optional[str] = None
# Set this to your Zulip API server URI
ZULIP_SITE = "https://zulip.example.com"
# If you wish to log to a file rather than stdout/stderr,
# please fill this out your desired path
LOG_FILE: Optional[str] = None
# This file is used to resume this mirror in case the script shuts down.
# It is required and needs to be writeable.
RESUME_FILE = "/var/tmp/zulip_codebase.state"
|
d09adc0f93ce3f93846622563c5e1858ea5a1181
|
38fff7bdefd8d62a740d51329b50d0e1e49258bb
|
/infra/constants.py
|
e085700c9b2523e156a69dfb23a4e87e308be684
|
[
"Apache-2.0"
] |
permissive
|
google/oss-fuzz
|
026384c2ada61ef68b147548e830f60730c5e738
|
f0275421f84b8f80ee767fb9230134ac97cb687b
|
refs/heads/master
| 2023-08-31T23:30:28.157702
| 2023-08-31T21:49:30
| 2023-08-31T21:49:30
| 63,809,205
| 9,438
| 2,315
|
Apache-2.0
| 2023-09-14T20:32:19
| 2016-07-20T19:39:50
|
Shell
|
UTF-8
|
Python
| false
| false
| 1,355
|
py
|
constants.py
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
"""Constants for OSS-Fuzz."""
DEFAULT_EXTERNAL_BUILD_INTEGRATION_PATH = '.clusterfuzzlite'
DEFAULT_LANGUAGE = 'c++'
DEFAULT_SANITIZER = 'address'
DEFAULT_ARCHITECTURE = 'x86_64'
DEFAULT_ENGINE = 'libfuzzer'
LANGUAGES = [
'c',
'c++',
'go',
'javascript',
'jvm',
'python',
'rust',
'swift',
]
LANGUAGES_WITH_COVERAGE_SUPPORT = [
'c', 'c++', 'go', 'jvm', 'python', 'rust', 'swift', 'javascript'
]
SANITIZERS = [
'address',
'none',
'memory',
'undefined',
'thread',
'coverage',
'introspector',
'hwaddress',
]
ARCHITECTURES = ['i386', 'x86_64', 'aarch64']
ENGINES = ['libfuzzer', 'afl', 'honggfuzz', 'centipede', 'none', 'wycheproof']
|
9c625a2af2462e7b175fb241a04574f0e1d7facf
|
bb666509f2a6cf07058352f8df63cb58e46a0401
|
/translate.py
|
79f7e464f4e8ec617b2f79f9640f34e6c03cbf9f
|
[
"MIT"
] |
permissive
|
gregzaal/Auto-Voice-Channels
|
f6d1764846ee5e17f98b413e1cbfab8c7a71b05a
|
17698450c6f885ae3ee3dae059754ba72931adf6
|
refs/heads/master
| 2023-05-25T10:10:38.320456
| 2022-10-08T09:09:44
| 2022-10-08T09:09:44
| 237,802,209
| 208
| 189
|
MIT
| 2023-05-22T22:41:04
| 2020-02-02T16:44:02
|
Python
|
UTF-8
|
Python
| false
| false
| 4,643
|
py
|
translate.py
|
import unicodedata
from utils import match_case
mc = match_case
digits = {
"0": "Zero",
"1": "One",
"2": "Two",
"3": "Three",
"4": "Four",
"5": "Five",
"6": "Six",
"7": "Seven",
"8": "Eight",
"9": "Nine",
}
def uwu(s):
"""Sources:
https://github.com/QuazzyWazzy/UwU-fy
https://github.com/mackyclemen/uwu-cpp
https://github.com/mackyclemen/uwu-android
"""
# Rs & Ls to Ws
tmp = ""
for i, c in enumerate(s):
if c.lower() in ["r", "l"]:
c = mc("w", c)
tmp += c
s = tmp
# Japanize?
trans = [
[["wove"], "wuv"],
[["wewcome"], "youkoso"],
[["is that so"], "naruhodo"],
[["that's why", "thats why"], "dakara"],
[["what"], "nani"],
[["when"], "itsu"],
[["the"], "za"],
[["wowwd"], "warudo"],
[["good morning"], "ohayou"],
[["good aftewnoon"], "konnichiwa"],
[["good night"], "oyasumi"],
[["thank you", "thanks", "tnx", "ty"], "arigatou"],
[["bye bye", "bye", "goodbye"], "sayonara"],
[["sowwy", "apowogies"], "gomenasai"],
[["my bad"], "warukatta"],
[["excuse me"], "sumimasen"],
[["wife"], "waifu"],
[["husband"], "hasubando"],
[["cute"], "kawaii"],
[["nice"], "suteki"],
[["bad"], "warui"],
[["coow"], "sugoi"],
[["handsome"], "kakkoi"],
[["idiot"], "baka"],
[["dumb", "dumbass"], "aho"],
[["undewstand"], "wakarimasu"],
[["undewstood"], "wakatta"],
[["dead"], "deddo"],
[["shut up", "shut the fuck up", "stfu", "shutup", "shattap"], "urusai"],
[["fuck"], "fack"],
[["fucked"], "facked"],
[["fucking"], "facking"],
[["this is bad", "oh no", "that's bad", "thats bad"], "yabai"],
[["name"], "namae"],
[["sistew"], "oneesan"],
[["bwothew"], "oniichan"],
[["wittwe sistew"], "imouto"],
[["wittwe bwothew"], "otouto"],
[["fwiends"], "fwends"],
[["fwiend"], "fwend"],
[["awwy", "awwies", "comnrade", "comrades"], "nakama"],
[["enemy", "enemies", "nemesis"], "teki"],
[["see you", "see ya"], "mata ne"],
[["good wuck", "goodwuck", "do your best", "you got this"], "ganbatte"],
[["dog"], "inu"],
[["cat"], "neko"],
[["with"], "wid"],
[["without"], "widout"],
]
trans_d = {}
for t in trans:
for w in t[0]:
trans_d[w] = t[1]
s = " ".join([mc(trans_d[w.lower()], w) if w.lower() in trans_d else w for w in s.split(" ")])
replacements = {
"no": "nyo",
"mo": "myo",
"No": "Nyo",
"Mo": "Myo",
"NO": "NYO",
"MO": "MYO",
"hu": "hoo",
"Hu": "Hoo",
"HU": "HOO",
"th": "d",
"Th": "D",
"TH": "D",
}
for r, rv in replacements.items():
s = s.replace(r, rv)
return s
def small_caps(s):
new_s = ""
for c in s:
if c.islower():
try:
new_s += unicodedata.lookup("LATIN LETTER SMALL CAPITAL " + c)
except KeyError:
new_s += c
else:
new_s += c
return new_s
def mathematical_unicode(mode, s):
new_s = ""
for c in s:
nc = c
case = "Capital"
if c.islower():
case = "Small"
try:
int(c)
except ValueError:
pass
else:
case = "Digit"
nc = digits[c]
try:
new_s += unicodedata.lookup("Mathematical {} {} {}".format(mode, case, nc))
except KeyError:
new_s += c
return new_s
def bold(s):
return mathematical_unicode("Bold", s)
def italic(s):
return mathematical_unicode("Italic", s)
def bolditalic(s):
return mathematical_unicode("Bold Italic", s)
def script(s):
return mathematical_unicode("Script", s)
def boldscript(s):
return mathematical_unicode("Bold Script", s)
def fraktur(s):
return mathematical_unicode("Fraktur", s)
def boldfraktur(s):
return mathematical_unicode("Bold Fraktur", s)
def double(s):
return mathematical_unicode("Double-Struck", s)
def sans(s):
return mathematical_unicode("Sans-Serif", s)
def boldsans(s):
return mathematical_unicode("Sans-Serif Bold", s)
def italicsans(s):
return mathematical_unicode("Sans-Serif Italic", s)
def bolditalicsans(s):
return mathematical_unicode("Sans-Serif Bold Italic", s)
def mono(s):
return mathematical_unicode("Monospace", s)
|
f1de7bd8f4c6b96b0d96e13b07c1ed653edc7a47
|
c82aceed27b19578708f1aa1497a0a2e7268e891
|
/modules/api/src/test/functional/tests/test_data.py
|
f1f545caea530e8311a661500fac217ed2080479
|
[
"MPL-2.0",
"EPL-1.0",
"Apache-2.0"
] |
permissive
|
vinyldns/vinyldns
|
441ba87943db67aba61806f47d85de5c5380dd99
|
ec54b1d533f744fc7777aa747b6ad4f1c46d0c3e
|
refs/heads/master
| 2023-08-31T04:17:28.072961
| 2023-08-22T14:18:30
| 2023-08-22T14:18:30
| 142,474,323
| 353
| 134
|
Apache-2.0
| 2023-09-12T20:26:47
| 2018-07-26T17:43:18
|
Scala
|
UTF-8
|
Python
| false
| false
| 2,935
|
py
|
test_data.py
|
class TestData:
A = {
"zoneId": None,
"name": "test-create-a-ok",
"type": "A",
"ttl": 100,
"account": "foo",
"records": [
{
"address": "10.1.1.1"
},
{
"address": "10.2.2.2"
}
]
}
AAAA = {
"zoneId": None,
"name": "test-create-aaaa-ok",
"type": "AAAA",
"ttl": 100,
"account": "foo",
"records": [
{
"address": "2001:db8:0:0:0:0:0:3"
},
{
"address": "2002:db8:0:0:0:0:0:3"
}
]
}
CNAME = {
"zoneId": None,
"name": "test-create-cname-ok",
"type": "CNAME",
"ttl": 100,
"account": "foo",
"records": [
{
"cname": "cname."
}
]
}
MX = {
"zoneId": None,
"name": "test-create-mx-ok",
"type": "MX",
"ttl": 100,
"account": "foo",
"records": [
{
"preference": 100,
"exchange": "exchange."
}
]
}
PTR = {
"zoneId": None,
"name": "10.20",
"type": "PTR",
"ttl": 100,
"account": "foo",
"records": [
{
"ptrdname": "ptr."
}
]
}
SPF = {
"zoneId": None,
"name": "test-create-spf-ok",
"type": "SPF",
"ttl": 100,
"account": "foo",
"records": [
{
"text": "spf."
}
]
}
SRV = {
"zoneId": None,
"name": "test-create-srv-ok",
"type": "SRV",
"ttl": 100,
"account": "foo",
"records": [
{
"priority": 1,
"weight": 2,
"port": 8000,
"target": "srv."
}
]
}
SSHFP = {
"zoneId": None,
"name": "test-create-sshfp-ok",
"type": "SSHFP",
"ttl": 100,
"account": "foo",
"records": [
{
"algorithm": 1,
"type": 1,
"fingerprint": "123456789ABCDEF67890123456789ABCDEF67890"
}
]
}
TXT = {
"zoneId": None,
"name": "test-create-txt-ok",
"type": "TXT",
"ttl": 100,
"account": "foo",
"records": [
{
"text": "some text"
}
]
}
RECORDS = [("A", A), ("AAAA", AAAA), ("CNAME", CNAME), ("MX", MX), ("PTR", PTR), ("SPF", SPF), ("SRV", SRV), ("SSHFP", SSHFP), ("TXT", TXT)]
FORWARD_RECORDS = [("A", A), ("AAAA", AAAA), ("CNAME", CNAME), ("MX", MX), ("SPF", SPF), ("SRV", SRV), ("SSHFP", SSHFP), ("TXT", TXT)]
REVERSE_RECORDS = [("CNAME", CNAME), ("PTR", PTR), ("TXT", TXT)]
|
e9c9fe1e0bd33f79a8adddc0a408572c5326fdc9
|
0576c3c80fb2c915c7bf50ca3c6276dd6f817ebe
|
/parliament/statement.py
|
dbf8766dd39d222609585f97ffa9ae938ad62632
|
[
"BSD-3-Clause"
] |
permissive
|
duo-labs/parliament
|
0852b6ea1b8f2f5340116513b201c28ad6edbdea
|
57f45649f512df3302d4185ecf1ec25226006336
|
refs/heads/main
| 2023-08-22T10:02:16.217541
| 2023-07-06T19:38:00
| 2023-07-06T19:38:00
| 208,323,795
| 981
| 98
|
BSD-3-Clause
| 2023-08-07T18:56:02
| 2019-09-13T18:30:15
|
Python
|
UTF-8
|
Python
| false
| false
| 40,728
|
py
|
statement.py
|
import json
import jsoncfg
import re
from . import (
iam_definition,
is_arn_match,
is_arn_strictly_valid,
expand_action,
UnknownActionException,
UnknownPrefixException,
)
from .finding import Finding
from .misc import make_list
def is_condition_key_match(document_key, str):
"""Given a documented condition key and one from a policy, determine if they match
Examples:
- s3:prefix and s3:prefix obviously match
- s3:ExistingObjectTag/<key> and s3:ExistingObjectTag/backup match
"""
# Normalize both
document_key = document_key.lower()
str = str.lower()
# Check if the document key has a pattern match in it
if "$" in document_key:
# Some services use a format like license-manager:ResourceTag/${TagKey}
if str.startswith(document_key.split("$")[0]):
return True
elif "<" in document_key:
# Some services use a format like s3:ExistingObjectTag/<key>
if str.startswith(document_key.split("<")[0]):
return True
elif "tag-key" in document_key:
# Some services use a format like secretsmanager:ResourceTag/tag-key
if str.startswith(document_key.split("tag-key")[0]):
return True
# Just return whether they match exactly
return document_key == str
def get_privilege_info(service, action):
"""
Given a service, like "s3"
and an action, like "ListBucket"
return the info from the docs about that action, along with some of the info from the docs
"""
for service_info in iam_definition:
if service_info["prefix"] == service:
for privilege_info in service_info["privileges"]:
if privilege_info["privilege"] == action:
privilege_info["service_resources"] = service_info["resources"]
privilege_info["service_conditions"] = service_info["conditions"]
return privilege_info
raise UnknownActionException("Unknown action {}:{}".format(service, action))
def get_arn_format(resource_type, service_resources):
"""
Search through the service_resources for the given resource_type.
Example, a resource_type of "object*", given the S3 resources,
should return "arn:.*?:s3:::.*?/.*?"
Raises an exception if the resource_type cannot be found.
"""
# Get rid of the asterisk
resource_type = resource_type.replace("*", "")
# Search through the resource service resources for this resource type
for resource_definition in service_resources:
if resource_type == resource_definition["resource"]:
# The resource["arn"] looks like "arn:${Partition}:s3:::${BucketName}/${ObjectName}"
# We need it to look like "arn:.*?:s3:::.*?/.*?" for matching
# This does a minimal (non-greedy) match
arn_format = re.sub(r"\$\{.*?\}", "*", resource_definition["arn"])
# Get the compiled regex
return arn_format
raise Exception(
"Could not find the resource type {} in the service definition. {}".format(
resource_type, service_resources
)
)
def is_valid_region(str):
region_regex = re.compile("^([a-z]{2}|us-gov)-[a-z]+-[0-9]$")
if str == "" or str == "*" or region_regex.match(str):
return True
return False
def is_valid_account_id(str):
# TODO I may want to check for common place holder values for account ids,
# such as 000000000000 and 123456789012
account_id_regex = re.compile("^[0-9]{12}$")
if str == "" or str == "*" or account_id_regex.match(str):
return True
return False
OPERATORS = {
# String
"StringEquals": "String",
"StringNotEquals": "String",
"StringEqualsIgnoreCase": "String",
"StringNotEqualsIgnoreCase": "String",
"StringLike": "String",
"StringNotLike": "String",
# Number
"NumericEquals": "Number",
"NumericNotEquals": "Number",
"NumericLessThan": "Number",
"NumericLessThanEquals": "Number",
"NumericGreaterThan": "Number",
"NumericGreaterThanEquals": "Number",
# Date
"DateEquals": "Date",
"DateNotEquals": "Date",
"DateLessThan": "Date",
"DateLessThanEquals": "Date",
"DateGreaterThan": "Date",
"DateGreaterThanEquals": "Date",
# Bool
"Bool": "Bool",
"Null": "Bool",
# Binary
"BinaryEquals": "Binary",
# Ip
"IpAddress": "Ip",
"NotIpAddress": "Ip",
# Arn
"ArnEquals": "Arn",
"ArnLike": "Arn",
"ArnNotEquals": "Arn",
"ArnNotLike": "Arn",
}
GLOBAL_CONDITION_KEYS = {
"aws:CalledVia": "String",
"aws:CalledViaFirst": "String",
"aws:CalledViaLast": "String",
"aws:CurrentTime": "Date",
"aws:EpochTime": "Date", # This needs to accept Date or Numeric
"aws:FederatedProvider": "String",
"aws:MultiFactorAuthAge": "Numeric",
"aws:MultiFactorAuthPresent": "Bool",
"aws:PrincipalAccount": "String",
"aws:PrincipalOrgID": "String",
"aws:PrincipalArn": "Arn",
"aws:PrincipalIsAWSService": "Bool",
"aws:PrincipalOrgPaths": "String",
"aws:PrincipalServiceName": "String",
"aws:PrincipalServiceNamesList": "String",
"aws:PrincipalTag": "String",
"aws:PrincipalType": "String",
"aws:RequestedRegion": "String",
"aws:SecureTransport": "Bool",
"aws:UserAgent": "String",
# Keys Available for Some Services
"aws:PrincipalTag/*": "String",
"aws:PrincipalType": "String",
"aws:Referer": "String",
"aws:RequestedRegion": "String",
"aws:RequestTag/*": "String",
"aws:ResourceAccount": "String",
"aws:ResourceOrgID": "String",
"aws:ResourceOrgPaths": "String",
"aws:ResourceTag/*": "String",
"aws:SecureTransport": "Bool",
"aws:SourceAccount": "String",
"aws:SourceArn": "Arn",
"aws:SourceIdentity": "String",
"aws:SourceIp": "Ip",
"aws:SourceVpc": "String",
"aws:SourceVpce": "String",
"aws:TagKeys": "String",
"aws:TokenIssueTime": "Date",
"aws:userid": "String",
"aws:username": "String",
"aws:VpcSourceIp": "Ip",
}
def get_global_key_type(str):
"""
Given a global key, return it's type, or None if not found.
Examples:
"aws:CurrentTime" -> "Date"
"aws:currenttime" -> "Date"
"test" -> None
"""
str = str.lower()
for key in GLOBAL_CONDITION_KEYS:
if "*" in key:
if str.startswith(key.split("*")[0].lower()):
return GLOBAL_CONDITION_KEYS[key]
elif str == key.lower():
return GLOBAL_CONDITION_KEYS[key]
return None
def is_value_in_correct_format_for_type(type_needed, values):
"""
Given a documented type needed such as "Arn", return True if all values match.
For example, if you have a condition of:
"Condition": {"DateGreaterThan" :{"aws:CurrentTime" : "2019-07-16T12:00:00Z"}}
Then this function would end up being called with:
- type_needed: Date
- values: ["2019-07-16T12:00:00Z"]
This would return True.
"""
type_needed = translate_documentation_types(type_needed)
regex_patterns = {
"Arn": "^arn:.*:.*:.*:.*:.*$",
# Binary is a base64 encoded value, like "QmluYXJ5VmFsdWVJbkJhc2U2NA=="
"Binary": "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$",
"Bool": "^(true)|(false)$",
"Date": "^\d{4}-\d{2}-\d{2}T\d\d:\d\d:\d\dZ$",
# Ip is either IPv4 or IPv6 (ex. 203.0.113.0/24 or 2001:DB8:1234:5678::/64)
# and may not have a range specified (ex. /32)
"Ip": "^(\d+.\d+.\d+.\d+(/\d+)?)|\d*:\d*:\d*:\d*:\d*:\d*(/\d+)?$",
"Number": "^\d+$",
"String": ".*", # Strings can be anything
}
for value in values:
# Convert the value to a string, which is need for Bool
# The json file should contain true or false, but due to being converted to a Python structure
# this will be capitalized as True or False.
if isinstance(value, bool):
value = "{}".format(value).lower()
if type_needed not in regex_patterns:
raise Exception("Unknown type: {}".format(type_needed))
regex = re.compile(regex_patterns[type_needed])
if not regex.match(value):
return False
return True
def translate_documentation_types(str):
"""
The docs use different type names, so this standardizes them.
Example: The condition secretsmanager:RecoveryWindowInDays is listed as using a "Long"
So return "Number"
"""
if str in ["Arn", "ARN"]:
return "Arn"
elif str in ["Bool", "Boolean"]:
return "Bool"
elif str in ["Date"]:
return "Date"
elif str in ["Long", "Numeric"]:
return "Number"
elif str in ["String", "string", "ArrayOfString"]:
return "String"
elif str in ["Ip"]:
return "Ip"
else:
raise Exception("Unknown data format: {}".format(str))
class Statement:
findings = []
resource_star = {}
effect_allow = True
stmt = None
sid = None
policy_id = None
_is_valid = True
def set_policy_identifier(self, policy_id):
self.policy_id = policy_id
def __init__(self, stmt, analyze=True):
self.findings = []
self.resource_star = {}
self.stmt = stmt
if analyze:
if not self.analyze_statement():
# Statement is malformed
self._is_valid = False
return
def __str__(self):
return json.dumps(self.stmt, indent=2)
@property
def is_valid(self):
return self._is_valid
def in_actions(self, privilege_prefix, privilege_name):
"""
Given "s3" "GetObject", determine if the privilege is in this statement.
This could happen either because the Action is ["s3:GetObject"] or ["s3:*", "ec2:*"]
or because the action is not in the NotAction. For example, if we have an Allow on NotAction "ec2:*",
then this, with "s3" "GetObject" returns True.
"""
if "Action" in self.stmt:
for action in make_list(self.stmt["Action"]):
if action.value == "*" or action.value == "*:*":
return True
expanded_actions = expand_action(action.value, raise_exceptions=False)
for action_struct in expanded_actions:
if (
action_struct["service"] == privilege_prefix
and action_struct["action"] == privilege_name
):
return True
return False
# Else, we're dealing with a NotAction
for action in make_list(self.stmt["NotAction"]):
if action == "*" or action == "*:*":
# I don't think it makes sense to have a "NotAction" of "*", but I'm including this check anyway.
return False
for action_struct in expand_action(action.value, raise_exceptions=False):
if (
action_struct["service"] == privilege_prefix
and action_struct["action"] == privilege_name
):
return False
return True
def get_resources_for_privilege(self, privilege_prefix, privilege_name):
"""
If this privilege is allowed or denied by this statement, return the relevant resources.
Else return None.
For example, if the statement has Actions 's3:*', and resources
["arn:aws:s3:::bucket", "arn:aws:s3:::bucket/*"]
and the privilege given is 's3:PutBucketPolicy' this should return ["arn:aws:s3:::bucket"],
as the other resource is only applicable to object related privileges.
If the privilege given is 's3:ListAllMyBuckets' this should return None, as that privilege does not
apply to these resources.
"""
if not self.in_actions(privilege_prefix, privilege_name):
# Given privilege is unrelated to this statement
return []
if "NotResource" in self.stmt:
# TODO How should I handle NotResource?
return ["*"]
affected_resources = []
privilege_info = get_privilege_info(privilege_prefix, privilege_name)
# Iterate through the resources defined in the action definition
for resource_type in privilege_info["resource_types"]:
resource_type = resource_type["resource_type"]
# Only check the required resources which have a "*" at the end
if "*" not in resource_type:
continue
arn_format = get_arn_format(
resource_type, privilege_info["service_resources"]
)
# At least one resource has to match the action's required resources
for resource in make_list(self.stmt["Resource"]):
if is_arn_match(resource_type, arn_format, resource.value):
affected_resources.append(resource.value)
elif resource.value == "*":
affected_resources.append(resource.value)
# Ensure we match on "*"
for resource in make_list(self.stmt["Resource"]):
if resource.value == "*":
affected_resources.append(resource.value)
return affected_resources
def add_finding(self, finding, detail=None, location={}):
"""
Add finding to the class.
finding: String specifiying the problem
location: Dictionary with information about where this problem is. Often set to:
{"location": "string"}
"""
if "jsoncfg.config_classes.ConfigJSONObject" in str(type(location)):
node_location = jsoncfg.node_location(location)
location = {"line": node_location.line, "column": node_location.column}
elif "jsoncfg.config_classes.ConfigJSONScalar" in str(type(location)):
node_location = jsoncfg.node_location(location)
location = {"string": location.value}
location["line"] = node_location.line
location["column"] = node_location.column
elif "tuple" in str(type(location)):
node_location = jsoncfg.node_location(location[1])
location = {"string": location[0]}
location["line"] = node_location.line
location["column"] = node_location.column
elif "jsoncfg.config_classes.ConfigJSONScalar" in str(
type(location.get("string", ""))
):
node_location = jsoncfg.node_location(location["string"])
location["line"] = node_location.line
location["column"] = node_location.column
location["string"] = location["string"].value
elif "tuple" in str(type(location.get("string", ""))):
node_location = jsoncfg.node_location(location["string"][1])
location["line"] = node_location.line
location["column"] = node_location.column
location["string"] = location["string"][0]
elif "jsoncfg.config_classes" in str(type(location.get("string", ""))):
location["string"] = location["string"][0]
self.findings.append(Finding(finding, detail, location))
def _check_principal(self, principal_element):
"""
Checks that the Principal (or NotPrincipal) element conforms to expectations
"""
for principal in make_list(principal_element):
if jsoncfg.node_is_scalar(principal):
if principal.value == "*":
continue
else:
self.add_finding("UNKNOWN_PRINCIPAL", location=principal)
continue
# We have a ConfigJSONObject
for json_object in principal:
key = json_object[0]
if key == "AWS":
for aws_principal in make_list(json_object[1]):
text = aws_principal.value
account_id_regex = re.compile("^\d{12}$")
arn_regex = re.compile(
"^arn:[-a-z\*]*:iam::(\d{12}|cloudfront|):.*$"
)
if text == "*":
pass
elif account_id_regex.match(text):
pass
elif arn_regex.match(text):
pass
else:
self.add_finding(
"UNKNOWN_PRINCIPAL", location=principal, detail=text
)
elif key == "Federated":
for federation in make_list(json_object[1]):
federation = federation.value
saml_regex = re.compile(
"^arn:[-a-z\*]*:iam::\d{12}:saml-provider/.*$"
)
if federation in [
"cognito-identity.amazonaws.com",
"www.amazon.com",
"graph.facebook.com",
"accounts.google.com",
]:
pass
elif saml_regex.match(federation):
pass
else:
self.add_finding(
"UNKNOWN_FEDERATION_SOURCE",
location=principal,
detail=federation,
)
elif key == "Service":
# This should be something like apigateway.amazonaws.com
# I don't know what all the restrictions could be though.
pass
else:
self.add_finding("UNKNOWN_PRINCIPAL", location=principal)
return True
def _check_condition(self, operator, condition_block, expanded_actions):
"""
operator is something like "StringLike"
condition_block is something like {"s3:prefix":["home/${aws:username}/*"]}
"""
operator_type_requirement = None
for documented_operator in OPERATORS:
op = documented_operator.lower()
if operator.lower() in [
op,
op + "ifexists",
"forallvalues:" + op,
"foranyvalue:" + op,
"forallvalues:" + op + "ifexists",
"foranyvalue:" + op + "ifexists",
]:
operator_type_requirement = OPERATORS[documented_operator]
break
if operator_type_requirement is None:
self.add_finding(
"UNKNOWN_OPERATOR",
detail=operator,
location=condition_block,
)
if operator_type_requirement == "Bool":
# Get the value that is being compared against
for c in condition_block:
value = str(c[1].value).lower()
if value != "true" and value != "false":
self.add_finding(
"MISMATCHED_TYPE_OPERATION_TO_NULL", location=condition_block
)
return False
for block in condition_block:
key = block[0]
values = []
for v in make_list(block[1]):
values.append(v.value)
# Check for known bad pattern
if operator.lower() == "bool":
if (
key.lower() == "aws:MultiFactorAuthPresent".lower()
and "false" in values
):
self.add_finding(
"BAD_PATTERN_FOR_MFA",
detail='The condition {"Bool": {"aws:MultiFactorAuthPresent":"false"}} is bad because aws:MultiFactorAuthPresent may not exist so it does not enforce MFA. You likely want to use a Deny with BoolIfExists.',
location=condition_block,
)
elif operator.lower() == "null":
if (
key.lower == "aws:MultiFactorAuthPresent".lower()
and "false" in values
):
self.add_finding(
"BAD_PATTERN_FOR_MFA",
detail='The condition {"Null": {"aws:MultiFactorAuthPresent":"false"}} is bad because aws:MultiFactorAuthPresent it does not enforce MFA, and only checks if the value exists. You likely want to use an Allow with {"Bool": {"aws:MultiFactorAuthPresent":"true"}}.',
location=condition_block,
)
if operator.lower() in ["null"]:
# The following condition is valid:
# "Condition": { "Null": { "aws:MultiFactorAuthAge": true }
# If we check further we'll get a MISMATCHED_TYPE finding due to
# aws:MultiFactorAuthAge being checked against a bool value instead of a date
continue
# The key here from the example is "s3:prefix"
condition_type = get_global_key_type(key)
if condition_type:
# This is a global key, like aws:CurrentTime
# Check if the values match the type (ex. must all be Date values)
if not is_value_in_correct_format_for_type(condition_type, values):
self.add_finding(
"MISMATCHED_TYPE",
detail="Type mismatch: {} requires a value of type {} but given {}".format(
key, condition_type, values
),
location=condition_block,
)
else:
# See if this is a service specific key
for action_struct in expanded_actions:
privilege_info = get_privilege_info(
action_struct["service"], action_struct["action"]
)
# Ensure the condition_key exists
match = None
for resource_type in privilege_info["resource_types"]:
for condition_key in resource_type["condition_keys"]:
if is_condition_key_match(condition_key, key):
match = condition_key
if match is None:
self.add_finding(
"UNKNOWN_CONDITION_FOR_ACTION",
detail="Unknown condition {} for action {}:{}".format(
key, action_struct["service"], action_struct["action"]
),
location=condition_block,
)
continue
condition_type = None
for condition in privilege_info["service_conditions"]:
if condition["condition"] == match:
condition_type = condition["type"]
if condition_type is None:
raise Exception(
"Action condition not found in service definition for {}".format(
match
)
)
if not is_value_in_correct_format_for_type(condition_type, values):
self.add_finding(
"MISMATCHED_TYPE",
detail="Type mismatch: {} requires a value of type {} but given {}".format(
key, condition_type, values
),
location=condition_block,
)
if condition_type is not None:
# if operator_type_requirement.lower() == 'string' and condition_type.lower() = 'arn':
# # Ignore these.
# pass
documenation_condition_type = translate_documentation_types(
condition_type
)
if operator_type_requirement != documenation_condition_type:
if (
operator_type_requirement == "String"
and documenation_condition_type == "Arn"
):
self.add_finding(
"MISMATCHED_TYPE_BUT_USABLE",
detail="Type mismatch: {} requires a value of type {} but given {}".format(
operator,
operator_type_requirement,
translate_documentation_types(condition_type),
),
location=condition_block,
)
else:
self.add_finding(
"MISMATCHED_TYPE",
detail="Type mismatch: {} requires a value of type {} but given {}".format(
operator,
operator_type_requirement,
translate_documentation_types(condition_type),
),
location=condition_block,
)
return
def analyze_statement(self):
"""
Given a statement, look for problems and extract out the parts.
If it is malformed, return False
"""
actions = []
resources = []
conditions = []
# Check no unknown elements exist
for element in self.stmt:
if element[0] not in [
"Effect",
"Sid",
"Principal",
"NotPrincipal",
"Action",
"NotAction",
"Resource",
"NotResource",
"Condition",
]:
self.add_finding(
"MALFORMED",
detail="Statement contains an unknown element",
location=element,
)
return False
# Track Sid for better location reporting
if "Sid" in self.stmt:
self.sid = self.stmt["Sid"].value
# Check Principal if it exists. This only applicable to resource policies. Also applicable to
# IAM role trust policies, but those don't have Resource elements, so that would break other things
# if we tried to check those.
if "Principal" in self.stmt and "NotPrincipal" in self.stmt:
self.add_finding(
"MALFORMED",
detail="Statement contains both Principal and NotPrincipal",
location=self.stmt,
)
return False
if "Principal" in self.stmt:
self._check_principal(self.stmt["Principal"])
elif "NotPrincipal" in self.stmt:
self._check_principal(self.stmt["NotPrincipal"])
# Check Effect
if "Effect" not in self.stmt:
self.add_finding(
"MALFORMED",
detail="Statement does not contain an Effect element",
location=self.stmt,
)
return False
effect = self.stmt["Effect"]
if effect.value not in ["Allow", "Deny"]:
self.add_finding(
"MALFORMED",
detail="Unknown Effect used. Effect must be either Allow or Deny",
location=effect,
)
return False
if effect.value == "Allow":
self.effect_allow = True
else:
self.effect_allow = False
# Check Sid
if "Sid" in self.stmt and not re.fullmatch(
"[0-9A-Za-z]*", self.stmt["Sid"].value
):
# The grammar is defined at https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_grammar.html
self.add_finding("INVALID_SID", location={"string": self.stmt["Sid"]})
return False
# Check Action
if "Action" in self.stmt and "NotAction" in self.stmt:
self.add_finding(
"MALFORMED",
detail="Statement contains both Action and NotAction",
location=self.stmt,
)
return False
if "Action" in self.stmt:
actions = make_list(self.stmt["Action"])
elif "NotAction" in self.stmt:
actions = make_list(self.stmt["NotAction"])
else:
self.add_finding(
"MALFORMED",
detail="Statement contains neither Action nor NotAction",
location=self.stmt,
)
return False
# Check Resource exists and save the list of resources for later
if "Resource" in self.stmt and "NotResource" in self.stmt:
self.add_finding(
"MALFORMED",
detail="Statement contains both Resource and NotResource",
location=self.stmt,
)
return False
if "Resource" in self.stmt:
resources = make_list(self.stmt["Resource"])
elif "NotResource" in self.stmt:
resources = make_list(self.stmt["NotResource"])
else:
self.add_finding(
"MALFORMED",
detail="Statement contains neither Resource nor NotResource",
location=self.stmt,
)
return False
# Check if a Condition element exists and if so save them for later
if "Condition" in self.stmt:
conditions = make_list(self.stmt["Condition"])
if len(conditions) > 1:
self.add_finding(
"MALFORMED",
detail="Condition formatted incorrectly",
location=self.stmt,
)
return False
# Expand the actions from s3:Get* to s3:GetObject and others
expanded_actions = []
has_malformed_action = False
for action in actions:
# Handle special case where all actions are allowed
if action.value == "*" or action.value == "*:*":
# TODO Should ensure the resource is "*" with this action
continue
try:
# Given an action such as "s3:List*", return all the possible values it could have
expanded_actions.extend(expand_action(action.value))
except UnknownActionException as e:
self.add_finding(
"UNKNOWN_ACTION",
detail=str(e),
location=action,
)
has_malformed_action = True
continue
except UnknownPrefixException as e:
self.add_finding("UNKNOWN_PREFIX", detail=str(e), location=action)
has_malformed_action = True
continue
except Exception as e:
self.add_finding("EXCEPTION", detail=str(e), location=action)
return False
if has_malformed_action:
return False
# Check the resources are correct formatted correctly
has_malformed_resource = False
for resource in resources:
if resource.value == "*":
continue
try:
parts = resource.value.split(":")
except AttributeError:
has_malformed_resource = True
self.add_finding(
"INVALID_ARN",
detail=(
"Must be a string. Maybe you're trying to analyze a "
"CloudFormation template which is outside of Parliament's "
"scope."
),
location=resource,
)
continue
if len(parts) < 6:
has_malformed_resource = True
self.add_finding(
"INVALID_ARN",
detail="Does not have 6 parts",
location=resource,
)
continue
elif parts[0] != "arn":
has_malformed_resource = True
self.add_finding(
"INVALID_ARN",
detail="Does not start with arn:",
location=resource,
)
continue
elif parts[1] not in ["aws", "aws-cn", "aws-us-gov", "aws-iso", "*", ""]:
has_malformed_resource = True
self.add_finding(
"INVALID_ARN",
detail="Unexpected partition",
location=resource,
)
continue
# The service is in parts[2]
elif not is_valid_region(parts[3]):
has_malformed_resource = True
self.add_finding(
"INVALID_ARN",
detail="Region expected to be of form like us-east-1",
location=resource,
)
elif not is_valid_account_id(parts[4]):
has_malformed_resource = True
self.add_finding(
"INVALID_ARN",
detail="Account expected to be of form like 123456789012",
location=resource,
)
# TODO I should check for the use of valid variables in the resource, such as ${aws:username}
# See https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html
# TODO Note, there are also service specific variables, such s3:prefix and sns:Endpoint
# These are case-insensitive
# Before checking the resource more, ensure we don't have malformed resources.
# Also, looking for mismatches of Actions and Resources has different rules when a
# Deny is involved. For example, AWSEC2SpotServiceRolePolicy has a Deny on ec2:RunInstances
# but only lists a resource of "arn:aws:ec2:*:*:instance/*" instead of all the required
# resources.
# https://github.com/SummitRoute/aws_managed_policies/blob/4b71905a9042e66b22bc3d2b9cb1378b1e1d239e/policies/AWSEC2SpotServiceRolePolicy#L21
if not has_malformed_resource and self.effect_allow:
actions_without_matching_resources = []
# Ensure the required resources for each action exist
# Iterate through each action
all_possible_resources_for_stmt = []
for action_struct in expanded_actions:
privilege_info = get_privilege_info(
action_struct["service"], action_struct["action"]
)
# If the privilege requires a resource of "*", ensure it has it.
if len(privilege_info["resource_types"]) == 0 or (
len(privilege_info["resource_types"]) == 1
and privilege_info["resource_types"][0]["resource_type"] == ""
):
all_possible_resources_for_stmt.append("*")
match_found = False
for resource in resources:
if resource.value == "*":
match_found = True
if not match_found:
actions_without_matching_resources.append(
{
"action": "{}:{}".format(
action_struct["service"], action_struct["action"]
),
"required_format": "*",
}
)
# Iterate through the resources defined in the action definition
for resource_type in privilege_info["resource_types"]:
resource_type = resource_type["resource_type"]
# Only check the required resources which have a "*" at the end
if "*" not in resource_type:
continue
arn_format = get_arn_format(
resource_type, privilege_info["service_resources"]
)
all_possible_resources_for_stmt.append(arn_format)
# At least one resource has to match the action's required resources
match_found = False
for resource in resources:
if resource.value == "*":
# expansion leads to duplication actions
action_key = "{}:{}".format(
action_struct["service"], action_struct["action"]
)
self.resource_star.setdefault(action_key, 0)
self.resource_star[action_key] += 1
match_found = True
continue
if is_arn_strictly_valid(
resource_type, arn_format, resource.value
):
match_found = True
continue
if not match_found:
actions_without_matching_resources.append(
{
"action": "{}:{}".format(
action_struct["service"], action_struct["action"]
),
"required_format": arn_format,
}
)
if actions_without_matching_resources:
# We have location info for each action via the variable `actions` which is a ConfigJSONArray, but
# because we can only list one location, we'll just use the location of the statement
# because `actions_without_matching_resources` contains the name of each action and the required format.
self.add_finding(
"RESOURCE_MISMATCH",
detail=actions_without_matching_resources,
location=self.stmt,
)
# If the Statement is applied to specific wildcarded Resources,
# but those Resources span all possible Resources that the Actions
# can touch, then the Statement is effectively unbounded, even without
# an explicit "*"
specified_resources = {r.value for r in resources}
# Only handle the cases where "*" is not needed (some Actions can
# only be scoped to "*") and it's not explcitly provided (already
# handled by RESOURCE_STAR). Otherwise, this check accidentally
# flags Actions that have - and need - Resource "*".
if (
"*" not in all_possible_resources_for_stmt
and "*" not in specified_resources
and specified_resources == set(all_possible_resources_for_stmt)
):
self.add_finding("RESOURCE_EFFECTIVELY_STAR", location=self.stmt)
# If conditions exist, it will be an element, which was previously made into a list
if len(conditions) == 1:
# Iterate through each condition, of something like:
# - "StringLike": {"s3:prefix":["home/${aws:username}/*"]}
# - "DateGreaterThan" :{"aws:CurrentTime":"2019-07-16T12:00:00Z"}
for condition in conditions[0]:
# The operator is the first element (ex. `StringLike`) and the condition_block follows it
operator = condition[0]
condition_block = condition[1]
self._check_condition(operator, condition_block, expanded_actions)
# add the resource_star finding last
# after all offending actions from the expanded list have been identified
if self.resource_star:
self.add_finding(
"RESOURCE_STAR", detail=sorted(self.resource_star), location=self.stmt
)
return not has_malformed_resource
|
27f0e5ef9236ebce6bcf275c210edd440b315c08
|
411c40a6936ee32310014d26977e79fcc6c0092f
|
/setup.py
|
f1c3de5f0e095a5bb11389a8096ece2c6da37d82
|
[
"MIT"
] |
permissive
|
demotomohiro/remocolab
|
df37a4071639ee86f40cebb4f8c6ec0e2513950d
|
176445b2ca07cacf3a41036cf44bb38caf2c5049
|
refs/heads/master
| 2022-09-30T10:57:27.578971
| 2022-09-15T10:45:49
| 2022-09-15T10:45:49
| 199,016,083
| 338
| 284
|
MIT
| 2021-09-22T16:00:01
| 2019-07-26T12:51:56
|
Python
|
UTF-8
|
Python
| false
| false
| 238
|
py
|
setup.py
|
from setuptools import setup
setup(
name = "remocolab.py",
version = "0.1",
py_modules = ['remocolab'],
url = "https://github.com/demotomohiro/remocolab",
author = "demotomohiro",
install_requires = ["pyngrok"]
)
|
69f9c1a98b86ee6d3290f24250d2638021cd2dc0
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/AlipayMarketingCampaignTaskQueryModel.py
|
43f153756503002b2e85a232c0eed51a3373b628
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,710
|
py
|
AlipayMarketingCampaignTaskQueryModel.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayMarketingCampaignTaskQueryModel(object):
def __init__(self):
self._open_id = None
self._task_cen_id = None
self._task_ids = None
self._user_id = None
@property
def open_id(self):
return self._open_id
@open_id.setter
def open_id(self, value):
self._open_id = value
@property
def task_cen_id(self):
return self._task_cen_id
@task_cen_id.setter
def task_cen_id(self, value):
self._task_cen_id = value
@property
def task_ids(self):
return self._task_ids
@task_ids.setter
def task_ids(self, value):
if isinstance(value, list):
self._task_ids = list()
for i in value:
self._task_ids.append(i)
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.open_id:
if hasattr(self.open_id, 'to_alipay_dict'):
params['open_id'] = self.open_id.to_alipay_dict()
else:
params['open_id'] = self.open_id
if self.task_cen_id:
if hasattr(self.task_cen_id, 'to_alipay_dict'):
params['task_cen_id'] = self.task_cen_id.to_alipay_dict()
else:
params['task_cen_id'] = self.task_cen_id
if self.task_ids:
if isinstance(self.task_ids, list):
for i in range(0, len(self.task_ids)):
element = self.task_ids[i]
if hasattr(element, 'to_alipay_dict'):
self.task_ids[i] = element.to_alipay_dict()
if hasattr(self.task_ids, 'to_alipay_dict'):
params['task_ids'] = self.task_ids.to_alipay_dict()
else:
params['task_ids'] = self.task_ids
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayMarketingCampaignTaskQueryModel()
if 'open_id' in d:
o.open_id = d['open_id']
if 'task_cen_id' in d:
o.task_cen_id = d['task_cen_id']
if 'task_ids' in d:
o.task_ids = d['task_ids']
if 'user_id' in d:
o.user_id = d['user_id']
return o
|
86b3ef5d5067140b0a6f4da7ce6f3db226957f8a
|
5d31f28f56b125f7e22d538a735587cfe779cd2d
|
/tests/handlers/test_base_handler_normalize_images.py
|
2a82b712fffdfd26427ec175ec9f2f368e396e94
|
[
"MIT"
] |
permissive
|
thumbor/thumbor
|
c3cb0fbdbf66492d321d4d64e9bc3cb59bfbc1fa
|
3e35fc024e895313d4b8c1da29286b96859ae122
|
refs/heads/master
| 2023-07-19T20:45:22.507924
| 2023-01-27T16:34:15
| 2023-07-14T19:02:46
| 1,488,139
| 7,641
| 839
|
MIT
| 2023-09-05T21:36:16
| 2011-03-16T17:30:05
|
Python
|
UTF-8
|
Python
| false
| false
| 2,392
|
py
|
test_base_handler_normalize_images.py
|
from unittest.mock import patch
from preggy import expect
from tornado.testing import gen_test
from tests.fixtures.images import default_image
from tests.handlers.test_base_handler import BaseImagingTestCase
from thumbor.config import Config
from thumbor.context import Context, ServerParameters
from thumbor.engines.pil import Engine
from thumbor.importer import Importer
from thumbor.loaders import LoaderResult
class BaseHandlerLoadingNormalizeImagesTestCase(BaseImagingTestCase):
def setUp(self):
super().setUp()
self.storage_patcher = patch(
"thumbor.storages.file_storage.Storage.get"
)
self.storage_mock = self.storage_patcher.start()
self.loader_patcher = patch("thumbor.loaders.http_loader.load")
self.loader_mock = self.loader_patcher.start()
def tearDown(self):
super().tearDown()
self.storage_patcher.stop()
self.loader_patcher.stop()
def get_context(self):
cfg = Config(SECURITY_KEY="ACME-SEC")
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = self.loader_path
cfg.STORAGE = "thumbor.storages.no_storage"
cfg.AUTO_WEBP = True
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(
8889, "localhost", "thumbor.conf", None, "info", None
)
server.security_key = "ACME-SEC"
ctx = Context(server, cfg, importer)
return ctx
@gen_test
async def test_can_normalize_image_loaded_from_storage(self):
self.storage_mock.return_value = default_image()
self.context.config.MAX_HEIGHT = 1
response = await self.async_fetch("/unsafe/smart/image.jpg")
expect(response.code).to_equal(200)
engine = Engine(self.context)
engine.load(response.body, None)
expect(engine.size).to_equal((1, 1))
@gen_test
async def test_can_normalize_image_loaded_from_upstream(self):
self.storage_mock.return_value = None
self.loader_mock.return_value = LoaderResult(buffer=default_image())
self.context.config.MAX_HEIGHT = 1
response = await self.async_fetch("/unsafe/smart/image.jpg")
expect(response.code).to_equal(200)
engine = Engine(self.context)
engine.load(response.body, None)
expect(engine.size).to_equal((1, 1))
|
1388ee21aa0c904812b99e6be7e6cba7c786ffd3
|
cfb638fee5fa2cdd3149a8ea91043e6bc0808275
|
/tulip/graphics.py
|
ae7d4d8751f4a909435fa908d413406d0f21af94
|
[
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-2-Clause"
] |
permissive
|
tulip-control/tulip-control
|
a23436a122dc317d39b0980c40f2da5740433ae5
|
83f993c2ae06aa8368e4bbba02bf52d68725e106
|
refs/heads/main
| 2023-08-22T14:39:02.797004
| 2022-06-23T19:40:03
| 2022-06-23T19:40:03
| 13,993,728
| 107
| 37
|
BSD-3-Clause
| 2023-09-06T17:29:38
| 2013-10-30T17:15:55
|
Python
|
UTF-8
|
Python
| false
| false
| 8,245
|
py
|
graphics.py
|
# Copyright (c) 2013-2014 by California Institute of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the California Institute of Technology nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH
# OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
"""
Convenience functions for plotting
WARNING: The public functions dimension, newax, dom2vec, quiver will
eventually be removed. Their use in new applications is discouraged.
They come from https://github.com/johnyf/pyvectorized
"""
from __future__ import division
from __future__ import print_function
import logging
logger = logging.getLogger(__name__)
from warnings import warn
try:
from itertools import zip_longest as izip_longest
except ImportError:
from itertools import izip_longest
import numpy as np
# inline
# from matplotlib import pyplot as plt
# from mpl_toolkits.mplot3d import axes3d
# from mayavi.mlab import quiver3d
# import graphviz as _gv
__all__ = ['dimension', 'newax', 'dom2vec', 'quiver']
def dimension(ndarray):
"""dimension of ndarray (DEPRECATED)
- ndim == 1:
dimension = 1
- ndim == 2:
dimension = shape[0]
"""
if ndarray.ndim < 2:
return ndarray.ndim
return ndarray.shape[0]
def newax(subplots=(1, 1), fig=None,
mode='list', dim=2):
"""Create (possibly multiple) new axes handles. (DEPRECATED)
@param fig: attach axes to this figure
@type fig: figure object,
should be consistent with C{dim}
@param subplots: number or layout of subplots
@type subplots: int or
2-tuple of subplot layout
@param mode: return the axes shaped as a
vector or as a matrix.
This is a convenience for later iterations
over the axes.
@type mode: 'matrix' | ['list']
@param dim: plot dimension:
- if dim == 2, then use matplotlib
- if dim == 3, then use mayavi
So the figure type depends on dim.
@return: C{(ax, fig)} where:
- C{ax}: axes created
- C{fig}: parent of ax
@rtype: list or list of lists,
depending on C{mode} above
"""
plt = _import_pyplot()
# layout or number of axes ?
try:
subplot_layout = tuple(subplots)
except:
subplot_layout = (1, subplots)
# reasonable layout ?
if len(subplot_layout) != 2:
raise Exception('newax:'
'subplot layout should be 2-tuple or int.')
# which figure ?
if fig is None:
fig = plt.figure()
# create subplot(s)
(nv, nh) = subplot_layout
n = np.prod(subplot_layout)
try:
dim = tuple(dim)
except:
# all same dim
dim = [dim] * n
# matplotlib (2D) or mayavi (3D) ?
ax = []
for (i, curdim) in enumerate(dim):
if curdim == 2:
curax = fig.add_subplot(nv, nh, i + 1)
ax.append(curax)
else:
curax = fig.add_subplot(nv, nh, i + 1, projection='3d')
ax.append(curax)
if curdim > 3:
warn('ndim > 3, but plot limited to 3.')
if mode == 'matrix':
ax = list(_grouper(nh, ax))
# single axes ?
if subplot_layout == (1, 1):
ax = ax[0]
return (ax, fig)
def dom2vec(domain, resolution):
"""Matrix of column vectors for meshgrid points. (DEPRECATED)
Returns a matrix of column vectors for the meshgrid
point coordinates over a parallelepiped domain
with the given resolution.
Example
=======
>>> domain = [0, 1, 0,2]
>>> resolution = [4, 5]
>>> q = domain2vec(domain, resolution)
@param domain: extremal values of parallelepiped
@type domain: [xmin, xmax, ymin, ymax, ...]
@param resolution: # points /dimension
@type resolution: [nx, ny, ...]
@return: q = matrix of column vectors (meshgrid point coordinates)
@rtype: [#dim x #points]
See also vec2meshgrid, domain2meshgrid, meshgrid2vec.
"""
domain = _grouper(2, domain)
lambda_linspace = lambda dom, res: np.linspace(dom[0], dom[1], res)
axis_grids = map(lambda_linspace, domain, resolution)
pnt_coor = np.meshgrid(*axis_grids)
q = np.vstack(map(np.ravel, pnt_coor))
return q
def quiver(x, v, ax=None, **kwargs):
"""Multi-dimensional quiver. (DEPRECATED)
Plot v columns at points in columns of x
in axes ax with plot formatting options in kwargs.
>>> import numpy as np
>>> import matplotlib as mpl
>>> from pyvectorized import quiver, dom2vec
>>> x = dom2vec([0, 10, 0, 11], [20, 20])
>>> v = np.vstack(np.sin(x[1, :] ), np.cos(x[2, :] ) )
>>> quiver(mpl.gca(), x, v)
see also
matplotlib.quiver, mayavi.quiver3
@param x: points where vectors are based
each column is a coordinate tuple
@type x: 2d lil | numpy.ndarray
@param v: vectors which to base at points x
@type v: 2d lil | numpy.ndarray
@param ax: axes handle, e.g., ax = gca())
@param x: matrix of points where vectors are plotted
@type x: [#dim x #points]
@param v: matrix of column vectors to plot at points x
@type v: [#dim x #points]
@param kwargs: plot formatting
@return: handle to plotted object(s)
"""
plt = _import_pyplot()
# multiple axes ?
try:
fields = [quiver(x, v, i, **kwargs) for i in ax]
return fields
except:
pass
if not ax:
ax = plt.gca()
dim = dimension(x)
if dim < 2:
raise Exception('ndim < 2')
elif dim < 3:
h = ax.quiver(x[0, :], x[1, :],
v[0, :], v[1, :], **kwargs)
else:
raise NotImplementedError
from mayavi.mlab import quiver3d
if ax:
print('axes arg ignored, mayavi used')
h = quiver3d(x[0, :], x[1, :], x[2, :],
v[0, :], v[1, :], v[2, :], **kwargs)
if dim > 3:
warn('quiver:ndim #dimensions > 3,' +
'plotting only 3D component.')
return h
def _grouper(n, iterable, fillvalue=None):
"""grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx
"""
args = [iter(iterable)] * n
return izip_longest(fillvalue=fillvalue, *args)
def _import_pyplot():
"""Try to import `matplotlib.pyplot`, or raise `ImportError`."""
try:
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import axes3d
except Exception as e:
raise ImportError('Failed to import `matplotlib.pyplot`')
return plt
def networkx_to_graphviz(graph):
"""Convert `networkx` `graph` to `graphviz.Digraph`."""
import graphviz as _gv
if graph.is_directed():
gv_graph = _gv.Digraph()
else:
gv_graph = _gv.Graph()
for u, d in graph.nodes(data=True):
gv_graph.node(
str(u), **d)
for u, v, d in graph.edges(data=True):
gv_graph.edge(
str(u), str(v), **d)
return gv_graph
|
55c4cc73fe566ec45ee641d932140109f6923d8c
|
6958f617af0c5a76304ceb1006c77bc70ca0e195
|
/tests/python/test_ast_refactor.py
|
deab2b0310c444583ee577277492eb2691019fc8
|
[
"Apache-2.0"
] |
permissive
|
taichi-dev/taichi
|
3fae315a494f1c97392d5b931c939abbbfba1bdc
|
b30b511f55e3d0ebff765ee048d0aaa4ba9e7667
|
refs/heads/master
| 2023-09-02T13:28:18.208792
| 2023-08-23T23:22:43
| 2023-08-23T23:22:43
| 74,660,642
| 17,231
| 1,841
|
Apache-2.0
| 2023-09-14T11:29:32
| 2016-11-24T10:00:05
|
C++
|
UTF-8
|
Python
| false
| false
| 22,171
|
py
|
test_ast_refactor.py
|
import sys
import numpy as np
import pytest
from taichi.lang import impl
from taichi.lang.util import has_pytorch
import taichi as ti
from tests import test_utils
if sys.version_info >= (3, 8):
# Import the test case only if the Python version is >= 3.8
from .py38_only import test_namedexpr # noqa
@test_utils.test()
def test_binop():
@ti.kernel
def foo(x: ti.i32, y: ti.i32, a: ti.template()):
a[0] = x + y
a[1] = x - y
a[2] = x * y
a[3] = impl.ti_float(x) / y
a[4] = x // y
a[5] = x % y
a[6] = x**y
a[7] = x << y
a[8] = x >> y
a[9] = x | y
a[10] = x ^ y
a[11] = x & y
x = 37
y = 3
a = ti.field(ti.f32, shape=(12,))
b = ti.field(ti.f32, shape=(12,))
a[0] = x + y
a[1] = x - y
a[2] = x * y
a[3] = x / y
a[4] = x // y
a[5] = x % y
a[6] = x**y
a[7] = x << y
a[8] = x >> y
a[9] = x | y
a[10] = x ^ y
a[11] = x & y
foo(x, y, b)
for i in range(12):
assert a[i] == test_utils.approx(b[i])
@test_utils.test()
def test_augassign():
@ti.kernel
def foo(x: ti.i32, y: ti.i32, a: ti.template(), b: ti.template()):
for i in a:
a[i] = x
a[0] += y
a[1] -= y
a[2] *= y
a[3] //= y
a[4] %= y
a[5] **= y
a[6] <<= y
a[7] >>= y
a[8] |= y
a[9] ^= y
a[10] &= y
b[0] = x
b[0] /= y
x = 37
y = 3
a = ti.field(ti.i32, shape=(11,))
b = ti.field(ti.i32, shape=(11,))
c = ti.field(ti.f32, shape=(1,))
d = ti.field(ti.f32, shape=(1,))
a[0] = x + y
a[1] = x - y
a[2] = x * y
a[3] = x // y
a[4] = x % y
a[5] = x**y
a[6] = x << y
a[7] = x >> y
a[8] = x | y
a[9] = x ^ y
a[10] = x & y
c[0] = x / y
foo(x, y, b, d)
for i in range(11):
assert a[i] == b[i]
assert c[0] == test_utils.approx(d[0])
@test_utils.test()
def test_unaryop():
@ti.kernel
def foo(x: ti.i32, a: ti.template()):
a[0] = +x
a[1] = -x
a[2] = not x
a[3] = ~x
x = 1234
a = ti.field(ti.i32, shape=(4,))
b = ti.field(ti.i32, shape=(4,))
a[0] = +x
a[1] = -x
a[2] = not x
a[3] = ~x
foo(x, b)
for i in range(4):
assert a[i] == b[i]
@test_utils.test()
def test_boolop():
@ti.kernel
def foo(a: ti.template()):
a[0] = 0 and 0
a[1] = 0 and 1
a[2] = 1 and 0
a[3] = 1 and 1
a[4] = 0 or 0
a[5] = 0 or 1
a[6] = 1 or 0
a[7] = 1 or 1
a[8] = 1 and 1 and 1 and 1
a[9] = 1 and 1 and 1 and 0
a[10] = 0 or 0 or 0 or 0
a[11] = 0 or 0 or 1 or 0
a = ti.field(ti.i32, shape=(12,))
b = ti.field(ti.i32, shape=(12,))
a[0] = 0 and 0
a[1] = 0 and 1
a[2] = 1 and 0
a[3] = 1 and 1
a[4] = 0 or 0
a[5] = 0 or 1
a[6] = 1 or 0
a[7] = 1 or 1
a[8] = 1 and 1 and 1 and 1
a[9] = 1 and 1 and 1 and 0
a[10] = 0 or 0 or 0 or 0
a[11] = 0 or 0 or 1 or 0
foo(b)
for i in range(12):
assert a[i] == b[i]
@test_utils.test()
def test_single_compare():
@ti.kernel
def foo(a: ti.template(), b: ti.template(), c: ti.template()):
for i in ti.static(range(3)):
c[i * 6] = a[i] == b[i]
c[i * 6 + 1] = a[i] != b[i]
c[i * 6 + 2] = a[i] < b[i]
c[i * 6 + 3] = a[i] <= b[i]
c[i * 6 + 4] = a[i] > b[i]
c[i * 6 + 5] = a[i] >= b[i]
a = ti.Vector([1, 1, 2])
b = ti.Vector([2, 1, 1])
c = ti.field(ti.i32, shape=(18,))
d = ti.field(ti.i32, shape=(18,))
for i in range(3):
c[i * 6] = a[i] == b[i]
c[i * 6 + 1] = a[i] != b[i]
c[i * 6 + 2] = a[i] < b[i]
c[i * 6 + 3] = a[i] <= b[i]
c[i * 6 + 4] = a[i] > b[i]
c[i * 6 + 5] = a[i] >= b[i]
foo(a, b, d)
for i in range(18):
assert c[i] == d[i]
@test_utils.test()
def test_chain_compare():
@ti.kernel
def foo(a: ti.i32, b: ti.i32, c: ti.template()):
c[0] = a == b == a
c[1] = a == b != a
c[2] = a != b == a
c[3] = a < b > a
c[4] = a > b < a
c[5] = a < b < a
c[6] = a > b > a
c[7] = a == a == a == a
c[8] = a == a == a != a
c[9] = a < b > a < b
c[10] = a > b > a < b
a = 1
b = 2
c = ti.field(ti.i32, shape=(11,))
d = ti.field(ti.i32, shape=(11,))
c[0] = a == b == a
c[1] = a == b != a
c[2] = a != b == a
c[3] = a < b > a
c[4] = a > b < a
c[5] = a < b < a
c[6] = a > b > a
c[7] = a == a == a == a
c[8] = a == a == a != a
c[9] = a < b > a < b
c[10] = a > b > a < b
foo(a, b, d)
for i in range(11):
assert c[i] == d[i]
@test_utils.test()
def test_return():
@ti.kernel
def foo(x: ti.i32) -> ti.i32:
return x + 1
assert foo(1) == 2
@test_utils.test()
def test_format_print():
a = ti.field(ti.i32, shape=(10,))
@ti.kernel
def foo():
a[0] = 1.0
a[5] = 2.0
print("Test if the string.format and fstring print works")
print("string.format: a[0]={}, a[5]={}".format(a[0], a[5]))
print(f"fstring: a[0]={a[0]}, a[5]={a[5]}")
@test_utils.test(print_preprocessed_ir=True)
def test_if():
@ti.kernel
def foo(x: ti.i32) -> ti.i32:
ret = 0
if x:
ret = 1
else:
ret = 0
return ret
assert foo(1)
assert not foo(0)
@test_utils.test(print_preprocessed_ir=True)
def test_static_if():
@ti.kernel
def foo(x: ti.template()) -> ti.i32:
ret = 0
if ti.static(x):
ret = 1
else:
ret = 0
return ret
assert foo(1)
assert not foo(0)
@test_utils.test(print_preprocessed_ir=True)
def test_struct_for():
a = ti.field(ti.i32, shape=(10,))
@ti.kernel
def foo(x: ti.i32):
for i in a:
a[i] = x
x = 5
foo(x)
for i in range(10):
assert a[i] == 5
@test_utils.test(print_preprocessed_ir=True)
def test_grouped_struct_for():
a = ti.field(ti.i32, shape=(4, 4))
@ti.kernel
def foo(x: ti.i32):
for I in ti.grouped(a):
a[I] = x
x = 5
foo(x)
for i in range(4):
for j in range(4):
assert a[i, j] == 5
@test_utils.test(print_preprocessed_ir=True)
def test_static_for():
a = ti.field(ti.i32, shape=(10,))
@ti.kernel
def foo(x: ti.i32):
for i in ti.static(range(10)):
a[i] = x
x = 5
foo(x)
for i in range(10):
assert a[i] == 5
@test_utils.test(print_preprocessed_ir=True)
def test_static_grouped_for():
a = ti.field(ti.i32, shape=(4, 4))
@ti.kernel
def foo(x: ti.i32):
for i in ti.static(ti.grouped(ti.ndrange((1, 3), (1, 3)))):
a[i] = x
x = 5
foo(x)
for i in range(4):
for j in range(4):
if 1 <= i < 3 and 1 <= j < 3:
assert a[i, j] == 5
else:
assert a[i, j] == 0
@test_utils.test(print_preprocessed_ir=True)
def test_range_for_single_argument():
a = ti.field(ti.i32, shape=(10,))
@ti.kernel
def foo(x: ti.i32):
for i in range(5):
a[i] = x
x = 5
foo(x)
for i in range(10):
if i < 5:
assert a[i] == 5
else:
assert a[i] == 0
@test_utils.test(print_preprocessed_ir=True)
def test_range_for_two_arguments():
a = ti.field(ti.i32, shape=(10,))
@ti.kernel
def foo(x: ti.i32):
for i in range(3, 7):
a[i] = x
x = 5
foo(x)
for i in range(10):
if 3 <= i < 7:
assert a[i] == 5
else:
assert a[i] == 0
@test_utils.test()
def test_range_for_three_arguments():
a = ti.field(ti.i32, shape=(10,))
with pytest.raises(ti.TaichiCompilationError, match="Range should have 1 or 2 arguments, found 3"):
@ti.kernel
def foo(x: ti.i32):
for i in range(3, 7, 2):
a[i] = x
x = 5
foo(x)
@test_utils.test(print_preprocessed_ir=True)
def test_ndrange_for():
x = ti.field(ti.f32, shape=(16, 32, 64))
@ti.kernel
def func():
for i, j, k in ti.ndrange((4, 10), (3, 8), 17):
x[i, j, k] = i + j * 10 + k * 100
func()
for i in range(16):
for j in range(32):
for k in range(64):
if 4 <= i < 10 and 3 <= j < 8 and k < 17:
assert x[i, j, k] == i + j * 10 + k * 100
else:
assert x[i, j, k] == 0
@test_utils.test(print_preprocessed_ir=True)
def test_grouped_ndrange_for():
x = ti.field(ti.i32, shape=(6, 6, 6))
y = ti.field(ti.i32, shape=(6, 6, 6))
@ti.kernel
def func():
lower = ti.Vector([0, 1, 2])
upper = ti.Vector([3, 4, 5])
for I in ti.grouped(ti.ndrange((lower[0], upper[0]), (lower[1], upper[1]), (lower[2], upper[2]))):
x[I] = I[0] + I[1] + I[2]
for i in range(0, 3):
for j in range(1, 4):
for k in range(2, 5):
y[i, j, k] = i + j + k
func()
for i in range(6):
for j in range(6):
for k in range(6):
assert x[i, j, k] == y[i, j, k]
@test_utils.test(print_preprocessed_ir=True)
def test_static_for_break():
n = 10
@ti.kernel
def foo(a: ti.template()):
for i in ti.static(range(n)):
a[i] = 3
if ti.static(i >= 5):
break
a[i] = 10
a[i] = 5
a = ti.field(ti.i32, shape=(n,))
foo(a)
for i in range(n):
if i < 5:
assert a[i] == 5
elif i == 5:
assert a[i] == 3
else:
assert a[i] == 0
@test_utils.test(print_preprocessed_ir=True)
def test_static_grouped_for_break():
n = 4
@ti.kernel
def foo(a: ti.template()):
for I in ti.static(ti.grouped(ti.ndrange(n, n))):
a[I] = 3
if ti.static(I[0] >= 3):
break
a[I] = 10
a[I] = 5
a = ti.field(ti.i32, shape=(n, n))
foo(a)
for i in range(n):
for j in range(n):
if i < 3:
assert a[i, j] == 5
elif i == 3 and j == 0:
assert a[i, j] == 3
else:
assert a[i, j] == 0
@test_utils.test(print_preprocessed_ir=True)
def test_static_for_continue():
n = 10
@ti.kernel
def foo(a: ti.template()):
for i in ti.static(range(n)):
a[i] = 3
if ti.static(i >= 5):
continue
a[i] = 10
a[i] = 5
a = ti.field(ti.i32, shape=(n,))
foo(a)
for i in range(n):
if i < 5:
assert a[i] == 5
else:
assert a[i] == 3
@test_utils.test(print_preprocessed_ir=True)
def test_static_grouped_for_continue():
n = 4
@ti.kernel
def foo(a: ti.template()):
for I in ti.static(ti.grouped(ti.ndrange(n, n))):
a[I] = 3
if ti.static(I[0] >= 3):
continue
a[I] = 10
a[I] = 5
a = ti.field(ti.i32, shape=(n, n))
foo(a)
for i in range(n):
for j in range(n):
if i < 3:
assert a[i, j] == 5
else:
assert a[i, j] == 3
@test_utils.test(print_preprocessed_ir=True)
def test_for_break():
n = 4
@ti.kernel
def foo(a: ti.template()):
for i in range(n):
for j in range(n):
a[i, j] = 3
if i >= 3:
break
a[i, j] = 10
a[i, j] = 5
a = ti.field(ti.i32, shape=(n, n))
foo(a)
for i in range(n):
for j in range(n):
if i < 3:
assert a[i, j] == 5
elif i == 3 and j == 0:
assert a[i, j] == 3
else:
assert a[i, j] == 0
@test_utils.test(print_preprocessed_ir=True)
def test_for_continue():
n = 4
@ti.kernel
def foo(a: ti.template()):
for i in range(n):
for j in range(n):
a[i, j] = 3
if i >= 3:
continue
a[i, j] = 10
a[i, j] = 5
a = ti.field(ti.i32, shape=(n, n))
foo(a)
for i in range(n):
for j in range(n):
if i < 3:
assert a[i, j] == 5
else:
assert a[i, j] == 3
@test_utils.test()
def test_while():
x = ti.field(ti.f32)
N = 1
ti.root.dense(ti.i, N).place(x)
@ti.kernel
def func():
i = 0
s = 0
while i < 10:
s += i
i += 1
x[0] = s
func()
assert x[0] == 45
@test_utils.test()
def test_while_break():
ret = ti.field(ti.i32, shape=())
@ti.kernel
def func():
i = 0
s = 0
while True:
s += i
i += 1
if i > 10:
break
ret[None] = s
func()
assert ret[None] == 55
@test_utils.test()
def test_while_continue():
ret = ti.field(ti.i32, shape=())
@ti.kernel
def func():
i = 0
s = 0
while i < 10:
i += 1
if i % 2 == 0:
continue
s += i
ret[None] = s
func()
assert ret[None] == 25
@test_utils.test(print_preprocessed_ir=True)
def test_func():
@ti.func
def bar(x):
return x * x, -x
a = ti.field(ti.i32, shape=(10,))
b = ti.field(ti.i32, shape=(10,))
@ti.kernel
def foo():
for i in a:
a[i], b[i] = bar(i)
foo()
for i in range(10):
assert a[i] == i * i
assert b[i] == -i
@test_utils.test(print_preprocessed_ir=True)
def test_func_in_python_func():
@ti.func
def bar(x: ti.template()):
if ti.static(x):
mat = bar(x // 2)
mat = mat @ mat
if ti.static(x % 2):
mat = mat @ ti.Matrix([[1, 1], [1, 0]])
return mat
else:
return ti.Matrix([[1, 0], [0, 1]])
def fibonacci(x):
ast_builder = impl.get_runtime().compiling_callable.ast_builder()
return impl.subscript(ast_builder, bar(x), 1, 0)
@ti.kernel
def foo(x: ti.template()) -> ti.i32:
return fibonacci(x)
fib = [0, 1, 1, 2, 3, 5, 8, 13, 21, 34]
for i in range(10):
assert foo(i) == fib[i]
@test_utils.test(print_preprocessed_ir=True)
def test_ifexp():
@ti.kernel
def foo(x: ti.i32) -> ti.i32:
return 1 if x else 0
assert foo(1) == 1
assert foo(0) == 0
@test_utils.test(print_preprocessed_ir=True)
def test_static_ifexp():
@ti.kernel
def foo(x: ti.template()) -> ti.i32:
return 1 if ti.static(x) else 0
assert foo(1) == 1
assert foo(0) == 0
@test_utils.test()
def test_static_assign():
a = ti.field(ti.i32, shape=(1,))
b = ti.field(ti.i32, shape=(1,))
@ti.kernel
def foo(xx: ti.template(), yy: ti.template()) -> ti.i32:
x, y = ti.static(xx, yy)
x[0] -= 1
y[0] -= 1
return x[0] + y[0]
a[0] = 2
b[0] = 3
assert foo(a, b) == 3
@test_utils.test()
def test_static_assign_element():
with pytest.raises(
ti.TaichiCompilationError,
match="Static assign cannot be used on elements in arrays",
):
@ti.kernel
def foo():
a = ti.static([1, 2, 3])
a[0] = ti.static(2)
foo()
@test_utils.test()
def test_recreate_variable():
with pytest.raises(ti.TaichiCompilationError, match="Recreating variables is not allowed"):
@ti.kernel
def foo():
a = 1
a = ti.static(2)
foo()
@test_utils.test()
def test_taichi_other_than_ti():
import taichi as tc
@tc.func
def bar(x: tc.template()):
if tc.static(x):
mat = bar(x // 2)
mat = mat @ mat
if tc.static(x % 2):
mat = mat @ tc.Matrix([[1, 1], [1, 0]])
return mat
else:
return tc.Matrix([[1, 0], [0, 1]])
def fibonacci(x):
ast_builder = impl.get_runtime().compiling_callable.ast_builder()
return impl.subscript(ast_builder, bar(x), 1, 0)
@tc.kernel
def foo(x: tc.template()) -> tc.i32:
return fibonacci(x)
fib = [0, 1, 1, 2, 3, 5, 8, 13, 21, 34]
for i in range(10):
assert foo(i) == fib[i]
@test_utils.test(require=ti.extension.assertion, debug=True, gdb_trigger=False)
def test_assert_message():
@ti.kernel
def func():
x = 20
assert 10 <= x < 20, "Foo bar"
with pytest.raises(RuntimeError, match="Foo bar"):
func()
@test_utils.test(require=ti.extension.assertion, debug=True, gdb_trigger=False)
def test_assert_message_formatted():
x = ti.field(dtype=int, shape=16)
x[10] = 42
@ti.kernel
def assert_formatted():
for i in x:
assert x[i] == 0, "x[%d] expect=%d got=%d" % (i, 0, x[i])
@ti.kernel
def assert_float():
y = 0.5
assert y < 0, "y = %f" % y
with pytest.raises(RuntimeError, match=r"x\[10\] expect=0 got=42"):
assert_formatted()
# TODO: note that we are not fully polished to be able to recover from
# assertion failures...
with pytest.raises(RuntimeError, match=r"y = 0.5"):
assert_float()
# success case
x[10] = 0
assert_formatted()
@test_utils.test()
def test_dict():
@ti.kernel
def foo(x: ti.template()) -> ti.i32:
a = {1: 2, 3: 4}
b = {5: 6, **a}
return b[x]
assert foo(1) == 2
with pytest.raises(ti.TaichiCompilationError):
foo(2)
@test_utils.test()
def test_single_listcomp():
@ti.func
def identity(dt, n: ti.template()):
return ti.Matrix([[ti.cast(int(i == j), dt) for j in range(n)] for i in range(n)])
@ti.kernel
def foo(n: ti.template()) -> ti.i32:
a = identity(ti.i32, n)
b = [i[0] for i in a]
ret = 0
for i in ti.static(range(n)):
ret += b[i]
return ret
assert foo(5) == 1
@test_utils.test()
def test_listcomp():
@ti.func
def identity(dt, n: ti.template()):
return ti.Matrix([[ti.cast(int(i == j), dt) for j in range(n)] for i in range(n)])
@ti.kernel
def foo(n: ti.template()) -> ti.i32:
a = identity(ti.i32, n)
b = [j for i in a for j in i]
ret = 0
for i in ti.static(range(n)):
for j in ti.static(range(n)):
ret += i * j * b[i * n + j]
return ret
assert foo(5) == 1 + 4 + 9 + 16
@test_utils.test()
def test_dictcomp():
@ti.kernel
def foo(n: ti.template()) -> ti.i32:
a = {i: i * i for i in range(n) if i % 3 if i % 2}
ret = 0
for i in ti.static(range(n)):
if ti.static(i % 3):
if ti.static(i % 2):
ret += a[i]
return ret
assert foo(10) == 1 * 1 + 5 * 5 + 7 * 7
@test_utils.test()
def test_dictcomp_fail():
@ti.kernel
def foo(n: ti.template(), m: ti.template()) -> ti.i32:
a = {i: i * i for i in range(n) if i % 3 if i % 2}
return a[m]
with pytest.raises(ti.TaichiCompilationError):
foo(5, 2)
with pytest.raises(ti.TaichiCompilationError):
foo(5, 3)
@pytest.mark.skipif(not has_pytorch(), reason="Pytorch not installed.")
@test_utils.test(arch=[ti.cpu, ti.cuda, ti.opengl])
def test_ndarray():
n = 4
m = 7
@ti.kernel
def run(x: ti.types.ndarray(dtype=ti.types.matrix(1, 1, ti.i32)), y: ti.types.ndarray()):
for i in ti.static(range(n)):
for j in ti.static(range(m)):
x[i, j][0, 0] += i + j + y[i, j]
a = ti.Matrix.ndarray(1, 1, ti.i32, shape=(n, m))
for i in range(n):
for j in range(m):
a[i, j][0, 0] = i * j
b = np.ones((n, m), dtype=np.int32)
run(a, b)
for i in range(n):
for j in range(m):
assert a[i, j][0, 0] == i * j + i + j + 1
@test_utils.test(arch=ti.cpu)
def test_sparse_matrix_builder():
n = 8
Abuilder = ti.linalg.SparseMatrixBuilder(n, n, max_num_triplets=100)
@ti.kernel
def fill(Abuilder: ti.types.sparse_matrix_builder()):
for i, j in ti.static(ti.ndrange(n, n)):
Abuilder[i, j] += i + j
fill(Abuilder)
A = Abuilder.build()
for i in range(n):
for j in range(n):
assert A[i, j] == i + j
@test_utils.test()
def test_func_default_value():
@ti.func
def bar(s, t=1):
return s + t
@ti.kernel
def foo() -> ti.i32:
return bar(1)
assert foo() == 2
@test_utils.test()
def test_func_default_value_fail():
with pytest.raises(ti.TaichiCompilationError):
@ti.func
def bar(s, t=1):
return s + t
@ti.kernel
def foo() -> ti.i32:
return bar(1, 2, 3)
foo()
@test_utils.test()
def test_raise():
dim = 1
m = ti.Matrix.field(dim, dim, ti.f32)
ti.root.place(m)
with pytest.raises(
ti.TaichiCompilationError,
match="Polar decomposition only supports 2D and 3D matrices.",
):
@ti.kernel
def foo():
ti.polar_decompose(m, ti.f32)
foo()
@test_utils.test()
def test_default_template_args_on_func():
@ti.func
def bar(a: ti.template() = 123):
return a
@ti.kernel
def foo() -> ti.i32:
return bar()
assert foo() == 123
@test_utils.test()
def test_grouped_static_for_cast():
@ti.kernel
def foo() -> ti.f32:
ret = 0.0
for I in ti.static(ti.grouped(ti.ndrange((4, 5), (3, 5), 5))):
tmp = I.cast(float)
ret += tmp[2] / 2
return ret
assert foo() == test_utils.approx(10)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.