blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8a959044feee4fbbc321fb06cedb019e5a3702b4
|
ec85250addb7357dfe7bb3e0680d53fc7b0fd8fb
|
/python_modules/dagster-webserver/dagster_webserver_tests/webserver/conftest.py
|
745ba0c7db013230eeaa7976d24ff853a2067d7b
|
[
"Apache-2.0"
] |
permissive
|
dagster-io/dagster
|
6adb5deee8bcf3ea1866a6a64f2ed81e1db5e73a
|
fe21995e0402878437a828c6a4244025eac8c43b
|
refs/heads/master
| 2023-09-05T20:46:08.203794
| 2023-09-05T19:54:52
| 2023-09-05T19:54:52
| 131,619,646
| 8,565
| 1,154
|
Apache-2.0
| 2023-09-14T21:57:37
| 2018-04-30T16:30:04
|
Python
|
UTF-8
|
Python
| false
| false
| 698
|
py
|
conftest.py
|
import pytest
from dagster import DagsterInstance, __version__
from dagster._cli.workspace.cli_target import get_workspace_process_context_from_kwargs
from dagster_webserver.webserver import DagsterWebserver
from starlette.testclient import TestClient
@pytest.fixture(scope="session")
def instance():
return DagsterInstance.local_temp()
@pytest.fixture(scope="session")
def test_client(instance):
process_context = get_workspace_process_context_from_kwargs(
instance=instance,
version=__version__,
read_only=False,
kwargs={"empty_workspace": True},
)
app = DagsterWebserver(process_context).create_asgi_app(debug=True)
return TestClient(app)
|
197cd815ff181912ce83f6b8a2e11ee5757b2349
|
87d9e6c0606ec9b18d2016fe4e21601c7b37b216
|
/test/django/test_mixins.py
|
d9bd775fd4998dc1d3b0df4997969ce249b65de5
|
[
"Python-2.0",
"MIT"
] |
permissive
|
Stranger6667/postmarker
|
d693f1b4d03f8770c5415048ad896601ba618a04
|
c5717014b6c837502353a8a5361832cb3bc49d7b
|
refs/heads/master
| 2023-06-22T01:57:01.797338
| 2022-01-15T14:09:43
| 2022-01-15T14:09:43
| 69,878,276
| 116
| 28
|
MIT
| 2023-06-20T20:17:31
| 2016-10-03T14:24:07
|
Python
|
UTF-8
|
Python
| false
| false
| 2,408
|
py
|
test_mixins.py
|
import pytest
from django.core.mail import EmailMultiAlternatives
from postmarker.django import PostmarkEmailMixin
pytestmark = pytest.mark.usefixtures("outbox")
class TaggedEmail(PostmarkEmailMixin, EmailMultiAlternatives):
pass
def test_tags(postmark_request):
TaggedEmail(
"Subject",
"Body",
"sender@example.com",
["receiver@example.com"],
tag="Test tag",
).send()
assert postmark_request.call_args[1]["json"][0] == {
"ReplyTo": None,
"Subject": "Subject",
"To": "receiver@example.com",
"Bcc": None,
"Headers": [],
"Cc": None,
"Attachments": [],
"TextBody": "Body",
"HtmlBody": None,
"TrackOpens": True,
"Tag": "Test tag",
"Metadata": None,
"MessageStream": None,
"From": "sender@example.com",
}
class EmailWithMetadata(PostmarkEmailMixin, EmailMultiAlternatives):
pass
def test_metadata(postmark_request):
EmailWithMetadata(
"Subject",
"Body",
"sender@example.com",
["receiver@example.com"],
metadata={"key1": "value1", "key2": "value2"},
).send()
assert postmark_request.call_args[1]["json"][0] == {
"ReplyTo": None,
"Subject": "Subject",
"To": "receiver@example.com",
"Bcc": None,
"Headers": [],
"Cc": None,
"Attachments": [],
"TextBody": "Body",
"HtmlBody": None,
"TrackOpens": True,
"Tag": None,
"Metadata": {"key1": "value1", "key2": "value2"},
"MessageStream": None,
"From": "sender@example.com",
}
class EmailWithMessageStream(PostmarkEmailMixin, EmailMultiAlternatives):
pass
def test_metadata(postmark_request):
EmailWithMessageStream(
"Subject", "Body", "sender@example.com", ["receiver@example.com"], message_stream="example-message-stream"
).send()
assert postmark_request.call_args[1]["json"][0] == {
"ReplyTo": None,
"Subject": "Subject",
"To": "receiver@example.com",
"Bcc": None,
"Headers": [],
"Cc": None,
"Attachments": [],
"TextBody": "Body",
"HtmlBody": None,
"TrackOpens": True,
"Tag": None,
"Metadata": None,
"MessageStream": "example-message-stream",
"From": "sender@example.com",
}
|
d402f55fa9b8b1b4d59292220a2a5aa147c52aef
|
057a475216e9beed41983481aafcaf109bbf58da
|
/tests/integration/test_move_partition_to_disk_on_cluster/test.py
|
c639e080cdf186b96d55c05fbb77ff5481e7718c
|
[
"Apache-2.0"
] |
permissive
|
ClickHouse/ClickHouse
|
fece5204263a5b4d693854b6039699265f1bb27f
|
6649328db809d51a694c358571539bc5820464be
|
refs/heads/master
| 2023-08-31T18:48:36.615225
| 2023-08-31T17:51:24
| 2023-08-31T17:51:24
| 60,246,359
| 23,878
| 5,449
|
Apache-2.0
| 2023-09-14T20:10:52
| 2016-06-02T08:28:18
|
C++
|
UTF-8
|
Python
| false
| false
| 2,666
|
py
|
test.py
|
import pytest
from helpers.client import QueryRuntimeException
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance(
"node1",
main_configs=[
"configs/config.d/storage_configuration.xml",
"configs/config.d/cluster.xml",
],
with_zookeeper=True,
stay_alive=True,
tmpfs=["/jbod1:size=10M", "/external:size=10M"],
macros={"shard": 0, "replica": 1},
)
node2 = cluster.add_instance(
"node2",
main_configs=[
"configs/config.d/storage_configuration.xml",
"configs/config.d/cluster.xml",
],
with_zookeeper=True,
stay_alive=True,
tmpfs=["/jbod1:size=10M", "/external:size=10M"],
macros={"shard": 0, "replica": 2},
)
@pytest.fixture(scope="module")
def start_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def test_move_partition_to_disk_on_cluster(start_cluster):
for node in [node1, node2]:
node.query(
sql="CREATE TABLE test_local_table"
"(x UInt64) "
"ENGINE=ReplicatedMergeTree('/clickhouse/tables/test_local_table', '{replica}') "
"ORDER BY tuple()"
"SETTINGS storage_policy = 'jbod_with_external', temporary_directories_lifetime=1;",
)
node1.query("INSERT INTO test_local_table VALUES (0)")
node1.query("SYSTEM SYNC REPLICA test_local_table", timeout=30)
try:
node1.query(
sql="ALTER TABLE test_local_table ON CLUSTER 'test_cluster' MOVE PARTITION tuple() TO DISK 'jbod1';",
)
except QueryRuntimeException:
pass
for node in [node1, node2]:
assert (
node.query(
"SELECT partition_id, disk_name FROM system.parts WHERE table = 'test_local_table' FORMAT Values"
)
== "('all','jbod1')"
)
node1.query(
sql="ALTER TABLE test_local_table ON CLUSTER 'test_cluster' MOVE PARTITION tuple() TO DISK 'external';",
)
for node in [node1, node2]:
assert (
node.query(
"SELECT partition_id, disk_name FROM system.parts WHERE table = 'test_local_table' FORMAT Values"
)
== "('all','external')"
)
node1.query(
sql="ALTER TABLE test_local_table ON CLUSTER 'test_cluster' MOVE PARTITION tuple() TO VOLUME 'main';",
)
for node in [node1, node2]:
assert (
node.query(
"SELECT partition_id, disk_name FROM system.parts WHERE table = 'test_local_table' FORMAT Values"
)
== "('all','jbod1')"
)
|
559f10396eebe91f908cc1326cbb035ecdfc2759
|
2ad93a1cf25a580fe980482d2d17a657de3b2523
|
/django-stubs/db/transaction.pyi
|
868b8b508d442350a6b286810324c738c57f036c
|
[
"MIT"
] |
permissive
|
typeddjango/django-stubs
|
f35dfcb001e54694a0a1e8c0afcc6e6a3d130c32
|
0117348c3c7713f25f96b46e53ebdeed7bdba544
|
refs/heads/master
| 2023-08-25T19:42:52.707151
| 2023-08-23T15:13:25
| 2023-08-23T15:13:25
| 142,779,680
| 1,133
| 376
|
MIT
| 2023-09-13T19:05:06
| 2018-07-29T17:08:50
|
Python
|
UTF-8
|
Python
| false
| false
| 2,178
|
pyi
|
transaction.pyi
|
from collections.abc import Callable, Iterator
from contextlib import contextmanager
from types import TracebackType
from typing import Any, TypeVar, overload
from django.db import ProgrammingError
class TransactionManagementError(ProgrammingError): ...
def get_connection(using: str | None = ...) -> Any: ...
def get_autocommit(using: str | None = ...) -> bool: ...
def set_autocommit(autocommit: bool, using: str | None = ...) -> Any: ...
def commit(using: str | None = ...) -> None: ...
def rollback(using: str | None = ...) -> None: ...
def savepoint(using: str | None = ...) -> str: ...
def savepoint_rollback(sid: str, using: str | None = ...) -> None: ...
def savepoint_commit(sid: str, using: str | None = ...) -> None: ...
def clean_savepoints(using: str | None = ...) -> None: ...
def get_rollback(using: str | None = ...) -> bool: ...
def set_rollback(rollback: bool, using: str | None = ...) -> None: ...
@contextmanager
def mark_for_rollback_on_error(using: str | None = ...) -> Iterator[None]: ...
def on_commit(func: Callable[[], object], using: str | None = ..., robust: bool = ...) -> None: ...
_C = TypeVar("_C", bound=Callable) # Any callable
# Don't inherit from ContextDecorator, so we can provide a more specific signature for __call__
class Atomic:
using: str | None
savepoint: bool
def __init__(self, using: str | None, savepoint: bool, durable: bool) -> None: ...
# When decorating, return the decorated function as-is, rather than clobbering it as ContextDecorator does.
def __call__(self, func: _C) -> _C: ...
def __enter__(self) -> None: ...
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
exc_tb: TracebackType | None,
) -> None: ...
# Bare decorator
@overload
def atomic(using: _C) -> _C: ...
# Decorator or context-manager with parameters
@overload
def atomic(using: str | None = ..., savepoint: bool = ..., durable: bool = ...) -> Atomic: ...
# Bare decorator
@overload
def non_atomic_requests(using: _C) -> _C: ...
# Decorator with arguments
@overload
def non_atomic_requests(using: str | None = ...) -> Callable[[_C], _C]: ...
|
9243817f357992930e1f702f14f8059329334be7
|
6ff85b80c6fe1b3ad5416a304b93551a5e80de10
|
/Python/Typing/ConvertIntToByte.py
|
68249f97040e3c981a269f6836a4ddf83a77cdae
|
[
"MIT"
] |
permissive
|
maniero/SOpt
|
c600cc2333e0a47ce013be3516bbb8080502ff2a
|
5d17e1a9cbf115eaea6d30af2079d0c92ffff7a3
|
refs/heads/master
| 2023-08-10T16:48:46.058739
| 2023-08-10T13:42:17
| 2023-08-10T13:42:17
| 78,631,930
| 1,002
| 136
|
MIT
| 2023-01-28T12:10:01
| 2017-01-11T11:19:24
|
C#
|
UTF-8
|
Python
| false
| false
| 132
|
py
|
ConvertIntToByte.py
|
print((65).to_bytes(1, byteorder='big'))
print(bytes([10,20,30,40,50,60,70,80,90,100]))
#https://pt.stackoverflow.com/q/270545/101
|
01f92720859e4953fdbfc3245ed95821edfd2e33
|
516ee648944f3fbab53c62112b5197357b133716
|
/deltapy/__init__.py
|
8eccaa00bf9c6779f632aa61534768c7556cebef
|
[] |
no_license
|
firmai/deltapy
|
4a906213bf7249a4c431337adfd2606fcb9ab379
|
05c8a6440440bcc5ee1d051d7dfeee70807329de
|
refs/heads/master
| 2022-06-05T05:30:57.113438
| 2022-03-01T16:13:48
| 2022-03-01T16:13:48
| 253,993,655
| 529
| 49
| null | 2020-04-17T17:08:26
| 2020-04-08T05:27:53
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 113
|
py
|
__init__.py
|
from deltapy import transform
from deltapy import interact
from deltapy import mapper
from deltapy import extract
|
d332c0e92de23ddc78d9e66cdd38df78c7240a0c
|
a1cb622d58db6941f72c75920a4791cdae5c9984
|
/tests/test_8cache.py
|
5e9be1ad156fab25ab105da6ace692a6b8cdf206
|
[
"MIT"
] |
permissive
|
chanzuckerberg/miniwdl
|
b9a5a6fee43bd65b6c2ed168c87cb3b2697ac948
|
6052fa836ea0cd91c7a07a6a2766b6cf44c0afe0
|
refs/heads/main
| 2023-08-24T03:38:39.712491
| 2023-08-13T03:03:49
| 2023-08-13T03:03:49
| 144,358,960
| 155
| 45
|
MIT
| 2023-08-12T20:20:40
| 2018-08-11T05:43:50
|
Python
|
UTF-8
|
Python
| false
| false
| 23,482
|
py
|
test_8cache.py
|
import glob
import json
import logging
import os
import stat
import random
import shutil
import tempfile
import time
import unittest
import subprocess
from unittest.mock import MagicMock, patch
from .context import WDL
from WDL import values_from_json, values_to_json
from WDL.runtime.cache import CallCache
class TestCallCache(unittest.TestCase):
test_wdl: str = R"""
version 1.0
task hello_blank {
# comment1
input {
String who # comment2
Array[String]? what
Map[String,Map[String,String]]? where
}
command <<<
# comment3
echo "Hello, ~{who}!"
>>>
output {#comment4
Int count = 12
}
}#comment5
"""
ordered_input_dict = {
"what": ["a", "ab", "b", "bc"],
"where": {"places": {"Minneapolis": "a", "SanFan": "b"}},
"who": "Alyssa",
}
doc = WDL.parse_document(test_wdl)
cache_dir = '/tmp/cache/'
struct_task: str = R"""
version 1.0
struct Box {
Array[File] str
}
task hello {
input {
Box box
}
command {
echo "Hello, world!"
}
output {
Int count = 13
}
}
"""
test_wdl_with_output_files: str = R"""
version 1.0
task hello {
String who
File foo = write_lines(["foo","bar","baz"])
File tsv = write_tsv([["one", "two", "three"], ["un", "deux", "trois"]])
File json = write_json({"key1": "value1", "key2": "value2"})
command <<<
echo "Hello, ~{who}!"
>>>
output {
File o_json = json
File a_tsv = tsv
File whynot = write_lines(["foo","bar","baz"])
Int count = 13
String ans = stdout()
}
}
"""
@classmethod
def setUpClass(cls):
logging.basicConfig(level=logging.DEBUG, format='%(name)s %(levelname)s %(message)s')
cls.logger = logging.getLogger(cls.__name__)
cls.cfg = WDL.runtime.config.Loader(cls.logger, [])
cls.cfg.override(
{"call_cache": {
"put": True,
"get": True,
"dir": cls.cache_dir
}
})
def setUp(self):
"""
initialize docker & provision temporary directory for a test (self._dir)
"""
self._dir = tempfile.mkdtemp(prefix=f"miniwdl_test_{self.id()}_")
def tearDown(self):
shutil.rmtree(self._dir)
try:
shutil.rmtree(self.cache_dir)
except FileNotFoundError:
print("No cache directory to delete")
def _run(self, wdl: str, inputs=None, expected_exception: Exception = None, cfg=None):
"""
run workflow/task & return outputs dict
"""
logger = logging.getLogger(self.id())
cfg = cfg or WDL.runtime.config.Loader(logger, [])
try:
with tempfile.NamedTemporaryFile(dir=self._dir, suffix=".wdl", delete=False) as outfile:
outfile.write(wdl.encode("utf-8"))
wdlfn = outfile.name
doc = WDL.load(wdlfn)
target = doc.workflow or doc.tasks[0]
if isinstance(inputs, dict):
inputs = WDL.values_from_json(inputs, target.available_inputs, target.required_inputs)
rundir, outputs = WDL.runtime.run(cfg, target, (inputs or WDL.Env.Bindings()), run_dir=self._dir)
self._rundir = rundir
except Exception as exn:
while isinstance(exn, WDL.runtime.RunFailed):
exn = exn.__context__
if expected_exception:
self.assertIsInstance(exn, expected_exception)
return exn
raise
self.assertIsNone(expected_exception, str(expected_exception) + " not raised")
return rundir, outputs
def test_input_digest_sorts_keys(self):
# Note this fails if input array is reordered
ordered_inputs = values_from_json(
self.ordered_input_dict, self.doc.tasks[0].available_inputs)
unordered_inputs = values_from_json(
{
"where": {"places": {"SanFan": "b", "Minneapolis": "a"}},
"what": ["a", "ab", "b", "bc"],
"who": "Alyssa"
}, self.doc.tasks[0].available_inputs)
ordered_digest = WDL.Value.digest_env(ordered_inputs)
unordered_digest = WDL.Value.digest_env(unordered_inputs)
self.assertEqual(ordered_digest, unordered_digest)
def test_normalization(self):
desc = self.doc.tasks[0]._digest_source()
self.assertEqual(desc, R"""
version 1.0
task hello_blank {
input {
String who
Array[String]? what
Map[String,Map[String,String]]? where
}
command <<<
# comment3
echo "Hello, ~{who}!"
>>>
output {
Int count = 12
}
}
""".strip())
def test_task_input_cache_matches_output(self):
# run task, check output matches what was stored in run_dir
cache = CallCache(cfg=self.cfg, logger=self.logger)
rundir, outputs = self._run(self.test_wdl, self.ordered_input_dict, cfg=self.cfg)
inputs = values_from_json(
self.ordered_input_dict, self.doc.tasks[0].available_inputs)
input_digest = WDL.Value.digest_env(inputs)
task_digest = self.doc.tasks[0].digest
with open(os.path.join(self.cache_dir, f"{self.doc.tasks[0].name}/{task_digest}/{input_digest}.json")) as f:
read_data = json.loads(f.read())
self.assertEqual(read_data, WDL.values_to_json(outputs))
def test_cache_prevents_task_rerun(self):
# run task twice, check _try_task not called for second run
mock = MagicMock(side_effect=WDL.runtime.task._try_task)
# test mock is called
with patch('WDL.runtime.task._try_task', mock):
self._run(self.test_wdl, self.ordered_input_dict, cfg=self.cfg)
self.assertEqual(mock.call_count, 1)
# call real _try_task function
self._run(self.test_wdl, self.ordered_input_dict, cfg=self.cfg)
# test mock is not called once cache is available
new_mock = MagicMock(side_effect=WDL.runtime.task._try_task)
with patch('WDL.runtime.task._try_task', new_mock):
self._run(self.test_wdl, self.ordered_input_dict, cfg=self.cfg)
self.assertEqual(new_mock.call_count, 0)
def test_default_config_does_not_use_cache(self):
# run task twice, check _try_task called for second run
mock = MagicMock(side_effect=WDL.runtime.task._try_task)
# test mock is called
with patch('WDL.runtime.task._try_task', mock):
self._run(self.test_wdl, self.ordered_input_dict)
self.assertEqual(mock.call_count, 1)
# call real _try_task function
self._run(self.test_wdl, self.ordered_input_dict)
# test mock is not called once cache is available
new_mock = MagicMock(side_effect=WDL.runtime.task._try_task)
with patch('WDL.runtime.task._try_task', new_mock):
self._run(self.test_wdl, self.ordered_input_dict)
self.assertEqual(new_mock.call_count, 1)
def test_get_cache_return_value_matches_outputs(self):
cache = CallCache(cfg=self.cfg, logger=self.logger)
rundir, outputs = self._run(self.test_wdl, self.ordered_input_dict, cfg=self.cfg)
inputs = values_from_json(
self.ordered_input_dict, self.doc.tasks[0].available_inputs)
input_digest = WDL.Value.digest_env(inputs)
task_digest = self.doc.tasks[0].digest
cache_value = cache.get(key=f"{self.doc.tasks[0].name}/{task_digest}/{input_digest}",
output_types=self.doc.tasks[0].effective_outputs,
inputs=inputs)
self.assertEqual(values_to_json(outputs), values_to_json(cache_value))
def test_a_task_with_the_same_inputs_and_different_commands_doesnt_pull_from_the_cache(self):
# run task twice, once with original wdl, once with updated wdl command, check _try_task called for second run
new_test_wdl: str = R"""
version 1.0
task hello_blank {
input {
String who
Array[String]? what
Map[String,Map[String,String]]? where
}
command <<<
echo "Heyyyyy, ~{who}!"
>>>
output {
Int count = 12
}
}
"""
# _try_task function for original wdl
self._run(self.test_wdl, self.ordered_input_dict, cfg=self.cfg)
# test _try_task is called when task def changes (with same inputs)
mock = MagicMock(side_effect=WDL.runtime.task._try_task)
with patch('WDL.runtime.task._try_task', mock):
self._run(new_test_wdl, self.ordered_input_dict, cfg=self.cfg)
self.assertEqual(mock.call_count, 1)
def test_a_task_with_the_same_inputs_and_different_outputs_doesnt_pull_from_the_cache(self):
# run task twice, once with original wdl, once with updated wdl command, check _try_task called for second run
new_test_wdl: str = R"""
version 1.0
task hello_blank {
input {
String who
Array[String]? what
Map[String,Map[String,String]]? where
}
command <<<
echo "Hello, ~{who}!"
>>>
output {
Int count = 13
}
}
"""
# _try_task function for original wdl
self._run(self.test_wdl, self.ordered_input_dict, cfg=self.cfg)
# test _try_task is called when task def changes (with same inputs)
mock = MagicMock(side_effect=WDL.runtime.task._try_task)
with patch('WDL.runtime.task._try_task', mock):
self._run(new_test_wdl, self.ordered_input_dict, cfg=self.cfg)
self.assertEqual(mock.call_count, 1)
def test_struct_handling(self):
with open(os.path.join(self._dir, "randomFile.txt"), "w") as outfile:
outfile.write("Gotta put something here")
inputs = {"box": {"str": [os.path.join(self._dir, "randomFile.txt")]}}
mock = MagicMock(side_effect=WDL.runtime.task._try_task)
# test mock is called
with patch('WDL.runtime.task._try_task', mock):
self._run(self.struct_task, inputs, cfg=self.cfg)
self.assertEqual(mock.call_count, 1)
# run for real
self._run(self.struct_task, inputs, cfg=self.cfg)
new_mock = MagicMock(side_effect=WDL.runtime.task._try_task)
# test mock not called for cached tasks containing a struct
with patch('WDL.runtime.task._try_task', new_mock):
self._run(self.struct_task, inputs, cfg=self.cfg)
self.assertEqual(new_mock.call_count, 0)
def test_cache_not_used_when_output_files_deleted(self):
inputs = {"who": "Alyssa"}
self._run(self.test_wdl_with_output_files, inputs, cfg=self.cfg)
# test mock is not called once cache is available
mock = MagicMock(side_effect=WDL.runtime.task._try_task)
with patch('WDL.runtime.task._try_task', mock):
self._run(self.test_wdl_with_output_files, inputs, cfg=self.cfg)
self.assertEqual(mock.call_count, 0)
# delete files
for x in glob.glob(f"{self._dir}/*_hello/out/a_tsv"):
shutil.rmtree(x)
# test mock is called now that cached file has been deleted
with patch('WDL.runtime.task._try_task', mock):
self._run(self.test_wdl_with_output_files, inputs, cfg=self.cfg)
self.assertEqual(mock.call_count, 1)
def test_cache_not_used_when_output_files_updated_after_cache_creation(self):
inputs = {"who": "Bethie"}
self._run(self.test_wdl_with_output_files, inputs, cfg=self.cfg)
# change modified time on outputs
time.sleep(0.1)
for x in glob.glob(f"{self._dir}/*_hello/out/a_tsv/*"):
os.utime(x)
# check that mock is called now that output file is older than cache file
mock = MagicMock(side_effect=WDL.runtime.task._try_task)
with patch('WDL.runtime.task._try_task', mock):
self._run(self.test_wdl_with_output_files, inputs, cfg=self.cfg)
self.assertEqual(mock.call_count, 1)
def test_cache_not_used_when_output_files_but_not__sym_links_updated_after_cache_creation(self):
inputs = {"who": "Bethie"}
self._run(self.test_wdl_with_output_files, inputs, cfg=self.cfg)
# change modified time on outputs
time.sleep(0.1)
for x in glob.glob(f"{self._dir}/*_hello/out/a_tsv/*"):
os.utime(x, follow_symlinks=False)
# check that mock is called now that output file is older than cache file
mock = MagicMock(side_effect=WDL.runtime.task._try_task)
with patch('WDL.runtime.task._try_task', mock):
self._run(self.test_wdl_with_output_files, inputs, cfg=self.cfg)
self.assertEqual(mock.call_count, 1)
def test_cache_not_used_when_file_in_array_recently_updated(self):
filenames = ["file1", "file2", "file3", "butterfinger"]
inputs = {"files": []}
for fn in filenames:
fn = os.path.join(self._dir, fn)
with open(fn, "w") as outfile:
print(fn, file=outfile)
inputs["files"].append(fn)
wdl = """
version 1.0
task return_file_array {
input {
Array[File] files
}
command <<<
set -euxo pipefail
mkdir files_out
find _miniwdl_inputs -type f -print0 | xargs -0 -iXXX cp XXX files_out/
>>>
output {
Array[File] files_out = glob("files_out/*")
}
}
"""
self._run(wdl, inputs, cfg=self.cfg)
#check cache used
mock = MagicMock(side_effect=WDL.runtime.task._try_task)
with patch('WDL.runtime.task._try_task', mock):
self._run(wdl, inputs, cfg=self.cfg)
self.assertEqual(mock.call_count, 0)
# change time
time.sleep(0.1)
for x in glob.glob(f"{self._dir}/*_return_file_array/work/files_out/file1"):
os.utime(x)
# check cache not used
with patch('WDL.runtime.task._try_task', mock):
self._run(wdl, inputs, cfg=self.cfg)
self.assertEqual(mock.call_count, 1)
def test_cache_not_used_when_input_file_recently_updated(self):
filenames = ["file1", "file2", "file3", "butterfinger"]
inputs = {"files": []}
for fn in filenames:
fn = os.path.join(self._dir, fn)
with open(fn, "w") as outfile:
print(fn, file=outfile)
inputs["files"].append(fn)
wdl = """
version 1.0
task return_file_array {
input {
Array[File] files
}
command <<<
echo "Hello"
>>>
output {
Int count = 13
}
}
"""
self._run(wdl, inputs, cfg=self.cfg)
#check cache used
mock = MagicMock(side_effect=WDL.runtime.task._try_task)
with patch('WDL.runtime.task._try_task', mock):
self._run(wdl, inputs, cfg=self.cfg)
self.assertEqual(mock.call_count, 0)
# change time on input file
time.sleep(0.1)
for x in glob.glob(f"{self._dir}/butterfinger"):
os.utime(x)
# check cache not used
with patch('WDL.runtime.task._try_task', mock):
self._run(wdl, inputs, cfg=self.cfg)
self.assertEqual(mock.call_count, 1)
def test_directory_coherence(self):
# test outputting files/subdirectories inside input Directory
wdl = R"""
version development
task t {
input {
Directory d
}
command {}
output {
Array[File] files = ["~{d}/alice.txt", "~{d}/sub/bob.txt"]
Array[Directory] dirs = ["~{d}/sub/dir"]
}
}
"""
os.makedirs(os.path.join(self._dir, "d/sub/dir"))
with open(os.path.join(self._dir, "d/alice.txt"), mode="w") as outfile:
print("Alice", file=outfile)
with open(os.path.join(self._dir, "d/sub/bob.txt"), mode="w") as outfile:
print("Bob", file=outfile)
with open(os.path.join(self._dir, "d/sub/dir/carol.txt"), mode="w") as outfile:
print("Carol", file=outfile)
inp = {"d": os.path.join(self._dir, "d")}
outp = self._run(wdl, inp, cfg=self.cfg)
WDL.Value.rewrite_env_files(outp[1], lambda fn: fn) # game coverage of deprecated fn
mock = MagicMock(side_effect=WDL.runtime.task._try_task)
with patch('WDL.runtime.task._try_task', mock):
# control
self._run(wdl, inp, cfg=self.cfg)
self.assertEqual(mock.call_count, 0)
# touch a file & check cache invalidated
subprocess.run(["touch", os.path.join(self._dir, "d/sub/dir/carol.txt")], check=True)
self._run(wdl, inp, cfg=self.cfg)
self.assertEqual(mock.call_count, 1)
# add a symlink
time.sleep(0.1)
os.symlink("sub/dir", os.path.join(self._dir, "d/link1"))
self._run(wdl, inp, cfg=self.cfg)
self.assertEqual(mock.call_count, 2)
# delete the symlink
time.sleep(0.1)
os.unlink(os.path.join(self._dir, "d/link1"))
self._run(wdl, inp, cfg=self.cfg)
self.assertEqual(mock.call_count, 3)
# control
self._run(wdl, inp, cfg=self.cfg)
self.assertEqual(mock.call_count, 3)
test_workflow_wdl = R"""
version development
struct Person {
String first
String? middle
String last
}
workflow multihello {
input {
Array[File] people_json
}
# COMMENT
scatter (person_json in people_json) {
call read_person {
input:
json = person_json
}
call hello {
input:
full_name = read_person.full_name
}
call hello as hello2 {
input:
full_name = write_lines([read_string(read_person.full_name)])
}
}
output {
Array[File] messages = flatten([hello.message, hello2.message])
}
}
task read_person {
input {
File json
}
Person person = read_json(json)
command {}
output {
File full_name = write_lines([sep(" ", select_all([person.first, person.middle, person.last]))])
}
}
task hello {
input {
File full_name
String? greeting = "Hello"
}
command <<<
echo '~{greeting}, ~{read_string(full_name)}!'
>>>
output {
File message = stdout()
}
}
task uncalled {
input {
Int i = 0
Person? p
}
command {}
}
"""
def test_workflow_digest(self):
doc = WDL.parse_document(self.test_workflow_wdl)
doc.typecheck()
# ensure digest is sensitive to changes in the struct type and called task (but not the
# uncalled task, or comments/whitespace)
doc2 = WDL.parse_document(self.test_workflow_wdl.replace("String? middle", "String? middle Int? age"))
doc2.typecheck()
self.assertNotEqual(doc.workflow.digest, doc2.workflow.digest)
doc2 = WDL.parse_document(self.test_workflow_wdl.replace('"Hello"', '"Hi"'))
doc2.typecheck()
self.assertNotEqual(doc.workflow.digest, doc2.workflow.digest)
doc2 = WDL.parse_document(self.test_workflow_wdl.replace('i = 0', 'i = 1'))
doc2.typecheck()
self.assertEqual(doc.workflow.digest, doc2.workflow.digest)
doc2 = WDL.parse_document(self.test_workflow_wdl.replace('# COMMENT', '#'))
doc2.typecheck()
self.assertEqual(doc.workflow.digest, doc2.workflow.digest)
doc2 = WDL.parse_document(self.test_workflow_wdl.replace('# COMMENT', '\n\n'))
doc2.typecheck()
self.assertEqual(doc.workflow.digest, doc2.workflow.digest)
def test_workflow_cache(self):
with open(os.path.join(self._dir, "alyssa.json"), mode="w") as outfile:
print('{"first":"Alyssa","last":"Hacker"}', file=outfile)
with open(os.path.join(self._dir, "ben.json"), mode="w") as outfile:
print('{"first":"Ben","last":"Bitdiddle"}', file=outfile)
inp = {"people_json": [os.path.join(self._dir, "alyssa.json"), os.path.join(self._dir, "ben.json")]}
rundir1, outp = self._run(self.test_workflow_wdl, inp, cfg=self.cfg)
wmock = MagicMock(side_effect=WDL.runtime.workflow._workflow_main_loop)
tmock = MagicMock(side_effect=WDL.runtime.task._try_task)
with patch('WDL.runtime.workflow._workflow_main_loop', wmock), patch('WDL.runtime.task._try_task', tmock):
# control
rundir2, outp2 = self._run(self.test_workflow_wdl, inp, cfg=self.cfg)
self.assertEqual(wmock.call_count, 0)
self.assertEqual(tmock.call_count, 0)
outp_inodes = set()
WDL.Value.rewrite_env_paths(outp, lambda p: outp_inodes.add(os.stat(p.value)[stat.ST_INO]))
outp2_inodes = set()
WDL.Value.rewrite_env_paths(outp2, lambda p: outp2_inodes.add(os.stat(p.value)[stat.ST_INO]))
self.assertEqual(outp_inodes, outp2_inodes)
with open(os.path.join(rundir1, "outputs.json")) as outputs1:
with open(os.path.join(rundir2, "outputs.json")) as outputs2:
assert outputs1.read() == outputs2.read()
# touch a file & check cache invalidated
with open(os.path.join(self._dir, "alyssa.json"), mode="w") as outfile:
print('{"first":"Alyssa","last":"Hacker","middle":"P"}', file=outfile)
_, outp2 = self._run(self.test_workflow_wdl, inp, cfg=self.cfg)
self.assertEqual(wmock.call_count, 1)
self.assertEqual(tmock.call_count, 3) # reran Alyssa, cached Ben
self.assertNotEqual(WDL.values_to_json(outp), WDL.values_to_json(outp2))
|
66b53fb024b28f14d870a6e7e08a242853a78374
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/dsmr/sensor.py
|
e4f9d0e9ab9cd88aba2251ce67cdaa56ba26a3ca
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 24,843
|
py
|
sensor.py
|
"""Support for Dutch Smart Meter (also known as Smartmeter or P1 port)."""
from __future__ import annotations
import asyncio
from asyncio import CancelledError
from contextlib import suppress
from dataclasses import dataclass
from datetime import timedelta
from functools import partial
from dsmr_parser import obis_references
from dsmr_parser.clients.protocol import create_dsmr_reader, create_tcp_dsmr_reader
from dsmr_parser.clients.rfxtrx_protocol import (
create_rfxtrx_dsmr_reader,
create_rfxtrx_tcp_dsmr_reader,
)
from dsmr_parser.objects import DSMRObject
import serial
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntity,
SensorEntityDescription,
SensorStateClass,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_HOST,
CONF_PORT,
EVENT_HOMEASSISTANT_STOP,
EntityCategory,
UnitOfEnergy,
UnitOfVolume,
)
from homeassistant.core import CoreState, Event, HomeAssistant, callback
from homeassistant.helpers.device_registry import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import StateType
from homeassistant.util import Throttle
from .const import (
CONF_DSMR_VERSION,
CONF_PRECISION,
CONF_PROTOCOL,
CONF_RECONNECT_INTERVAL,
CONF_SERIAL_ID,
CONF_SERIAL_ID_GAS,
CONF_TIME_BETWEEN_UPDATE,
DATA_TASK,
DEFAULT_PRECISION,
DEFAULT_RECONNECT_INTERVAL,
DEFAULT_TIME_BETWEEN_UPDATE,
DEVICE_NAME_ELECTRICITY,
DEVICE_NAME_GAS,
DOMAIN,
DSMR_PROTOCOL,
LOGGER,
)
UNIT_CONVERSION = {"m3": UnitOfVolume.CUBIC_METERS}
@dataclass
class DSMRSensorEntityDescriptionMixin:
"""Mixin for required keys."""
obis_reference: str
@dataclass
class DSMRSensorEntityDescription(
SensorEntityDescription, DSMRSensorEntityDescriptionMixin
):
"""Represents an DSMR Sensor."""
dsmr_versions: set[str] | None = None
is_gas: bool = False
SENSORS: tuple[DSMRSensorEntityDescription, ...] = (
DSMRSensorEntityDescription(
key="current_electricity_usage",
translation_key="current_electricity_usage",
obis_reference=obis_references.CURRENT_ELECTRICITY_USAGE,
device_class=SensorDeviceClass.POWER,
force_update=True,
state_class=SensorStateClass.MEASUREMENT,
),
DSMRSensorEntityDescription(
key="current_electricity_delivery",
translation_key="current_electricity_delivery",
obis_reference=obis_references.CURRENT_ELECTRICITY_DELIVERY,
device_class=SensorDeviceClass.POWER,
force_update=True,
state_class=SensorStateClass.MEASUREMENT,
),
DSMRSensorEntityDescription(
key="electricity_active_tariff",
translation_key="electricity_active_tariff",
obis_reference=obis_references.ELECTRICITY_ACTIVE_TARIFF,
dsmr_versions={"2.2", "4", "5", "5B", "5L"},
device_class=SensorDeviceClass.ENUM,
options=["low", "normal"],
icon="mdi:flash",
),
DSMRSensorEntityDescription(
key="electricity_used_tariff_1",
translation_key="electricity_used_tariff_1",
obis_reference=obis_references.ELECTRICITY_USED_TARIFF_1,
dsmr_versions={"2.2", "4", "5", "5B", "5L"},
device_class=SensorDeviceClass.ENERGY,
force_update=True,
state_class=SensorStateClass.TOTAL_INCREASING,
),
DSMRSensorEntityDescription(
key="electricity_used_tariff_2",
translation_key="electricity_used_tariff_2",
obis_reference=obis_references.ELECTRICITY_USED_TARIFF_2,
dsmr_versions={"2.2", "4", "5", "5B", "5L"},
force_update=True,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
),
DSMRSensorEntityDescription(
key="electricity_delivered_tariff_1",
translation_key="electricity_delivered_tariff_1",
obis_reference=obis_references.ELECTRICITY_DELIVERED_TARIFF_1,
dsmr_versions={"2.2", "4", "5", "5B", "5L"},
force_update=True,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
),
DSMRSensorEntityDescription(
key="electricity_delivered_tariff_2",
translation_key="electricity_delivered_tariff_2",
obis_reference=obis_references.ELECTRICITY_DELIVERED_TARIFF_2,
dsmr_versions={"2.2", "4", "5", "5B", "5L"},
force_update=True,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
),
DSMRSensorEntityDescription(
key="instantaneous_active_power_l1_positive",
translation_key="instantaneous_active_power_l1_positive",
obis_reference=obis_references.INSTANTANEOUS_ACTIVE_POWER_L1_POSITIVE,
device_class=SensorDeviceClass.POWER,
entity_registry_enabled_default=False,
state_class=SensorStateClass.MEASUREMENT,
),
DSMRSensorEntityDescription(
key="instantaneous_active_power_l2_positive",
translation_key="instantaneous_active_power_l2_positive",
obis_reference=obis_references.INSTANTANEOUS_ACTIVE_POWER_L2_POSITIVE,
device_class=SensorDeviceClass.POWER,
entity_registry_enabled_default=False,
state_class=SensorStateClass.MEASUREMENT,
),
DSMRSensorEntityDescription(
key="instantaneous_active_power_l3_positive",
translation_key="instantaneous_active_power_l3_positive",
obis_reference=obis_references.INSTANTANEOUS_ACTIVE_POWER_L3_POSITIVE,
device_class=SensorDeviceClass.POWER,
entity_registry_enabled_default=False,
state_class=SensorStateClass.MEASUREMENT,
),
DSMRSensorEntityDescription(
key="instantaneous_active_power_l1_negative",
translation_key="instantaneous_active_power_l1_negative",
obis_reference=obis_references.INSTANTANEOUS_ACTIVE_POWER_L1_NEGATIVE,
device_class=SensorDeviceClass.POWER,
entity_registry_enabled_default=False,
state_class=SensorStateClass.MEASUREMENT,
),
DSMRSensorEntityDescription(
key="instantaneous_active_power_l2_negative",
translation_key="instantaneous_active_power_l2_negative",
obis_reference=obis_references.INSTANTANEOUS_ACTIVE_POWER_L2_NEGATIVE,
device_class=SensorDeviceClass.POWER,
entity_registry_enabled_default=False,
state_class=SensorStateClass.MEASUREMENT,
),
DSMRSensorEntityDescription(
key="instantaneous_active_power_l3_negative",
translation_key="instantaneous_active_power_l3_negative",
obis_reference=obis_references.INSTANTANEOUS_ACTIVE_POWER_L3_NEGATIVE,
device_class=SensorDeviceClass.POWER,
entity_registry_enabled_default=False,
state_class=SensorStateClass.MEASUREMENT,
),
DSMRSensorEntityDescription(
key="short_power_failure_count",
translation_key="short_power_failure_count",
obis_reference=obis_references.SHORT_POWER_FAILURE_COUNT,
dsmr_versions={"2.2", "4", "5", "5B", "5L"},
entity_registry_enabled_default=False,
icon="mdi:flash-off",
entity_category=EntityCategory.DIAGNOSTIC,
),
DSMRSensorEntityDescription(
key="long_power_failure_count",
translation_key="long_power_failure_count",
obis_reference=obis_references.LONG_POWER_FAILURE_COUNT,
dsmr_versions={"2.2", "4", "5", "5B", "5L"},
entity_registry_enabled_default=False,
icon="mdi:flash-off",
entity_category=EntityCategory.DIAGNOSTIC,
),
DSMRSensorEntityDescription(
key="voltage_sag_l1_count",
translation_key="voltage_sag_l1_count",
obis_reference=obis_references.VOLTAGE_SAG_L1_COUNT,
dsmr_versions={"2.2", "4", "5", "5B", "5L"},
entity_registry_enabled_default=False,
entity_category=EntityCategory.DIAGNOSTIC,
),
DSMRSensorEntityDescription(
key="voltage_sag_l2_count",
translation_key="voltage_sag_l2_count",
obis_reference=obis_references.VOLTAGE_SAG_L2_COUNT,
dsmr_versions={"2.2", "4", "5", "5B", "5L"},
entity_registry_enabled_default=False,
entity_category=EntityCategory.DIAGNOSTIC,
),
DSMRSensorEntityDescription(
key="voltage_sag_l3_count",
translation_key="voltage_sag_l3_count",
obis_reference=obis_references.VOLTAGE_SAG_L3_COUNT,
dsmr_versions={"2.2", "4", "5", "5B", "5L"},
entity_registry_enabled_default=False,
entity_category=EntityCategory.DIAGNOSTIC,
),
DSMRSensorEntityDescription(
key="voltage_swell_l1_count",
translation_key="voltage_swell_l1_count",
obis_reference=obis_references.VOLTAGE_SWELL_L1_COUNT,
dsmr_versions={"2.2", "4", "5", "5B", "5L"},
entity_registry_enabled_default=False,
icon="mdi:pulse",
entity_category=EntityCategory.DIAGNOSTIC,
),
DSMRSensorEntityDescription(
key="voltage_swell_l2_count",
translation_key="voltage_swell_l2_count",
obis_reference=obis_references.VOLTAGE_SWELL_L2_COUNT,
dsmr_versions={"2.2", "4", "5", "5B", "5L"},
entity_registry_enabled_default=False,
icon="mdi:pulse",
entity_category=EntityCategory.DIAGNOSTIC,
),
DSMRSensorEntityDescription(
key="voltage_swell_l3_count",
translation_key="voltage_swell_l3_count",
obis_reference=obis_references.VOLTAGE_SWELL_L3_COUNT,
dsmr_versions={"2.2", "4", "5", "5B", "5L"},
entity_registry_enabled_default=False,
icon="mdi:pulse",
entity_category=EntityCategory.DIAGNOSTIC,
),
DSMRSensorEntityDescription(
key="instantaneous_voltage_l1",
translation_key="instantaneous_voltage_l1",
obis_reference=obis_references.INSTANTANEOUS_VOLTAGE_L1,
device_class=SensorDeviceClass.VOLTAGE,
entity_registry_enabled_default=False,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
),
DSMRSensorEntityDescription(
key="instantaneous_voltage_l2",
translation_key="instantaneous_voltage_l2",
obis_reference=obis_references.INSTANTANEOUS_VOLTAGE_L2,
device_class=SensorDeviceClass.VOLTAGE,
entity_registry_enabled_default=False,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
),
DSMRSensorEntityDescription(
key="instantaneous_voltage_l3",
translation_key="instantaneous_voltage_l3",
obis_reference=obis_references.INSTANTANEOUS_VOLTAGE_L3,
device_class=SensorDeviceClass.VOLTAGE,
entity_registry_enabled_default=False,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
),
DSMRSensorEntityDescription(
key="instantaneous_current_l1",
translation_key="instantaneous_current_l1",
obis_reference=obis_references.INSTANTANEOUS_CURRENT_L1,
device_class=SensorDeviceClass.CURRENT,
entity_registry_enabled_default=False,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
),
DSMRSensorEntityDescription(
key="instantaneous_current_l2",
translation_key="instantaneous_current_l2",
obis_reference=obis_references.INSTANTANEOUS_CURRENT_L2,
device_class=SensorDeviceClass.CURRENT,
entity_registry_enabled_default=False,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
),
DSMRSensorEntityDescription(
key="instantaneous_current_l3",
translation_key="instantaneous_current_l3",
obis_reference=obis_references.INSTANTANEOUS_CURRENT_L3,
device_class=SensorDeviceClass.CURRENT,
entity_registry_enabled_default=False,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
),
DSMRSensorEntityDescription(
key="belgium_max_power_per_phase",
translation_key="max_power_per_phase",
obis_reference=obis_references.BELGIUM_MAX_POWER_PER_PHASE,
dsmr_versions={"5B"},
device_class=SensorDeviceClass.POWER,
entity_registry_enabled_default=False,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
),
DSMRSensorEntityDescription(
key="belgium_max_current_per_phase",
translation_key="max_current_per_phase",
obis_reference=obis_references.BELGIUM_MAX_CURRENT_PER_PHASE,
dsmr_versions={"5B"},
device_class=SensorDeviceClass.POWER,
entity_registry_enabled_default=False,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
),
DSMRSensorEntityDescription(
key="electricity_imported_total",
translation_key="electricity_imported_total",
obis_reference=obis_references.ELECTRICITY_IMPORTED_TOTAL,
dsmr_versions={"5L", "5S", "Q3D"},
force_update=True,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
),
DSMRSensorEntityDescription(
key="electricity_exported_total",
translation_key="electricity_exported_total",
obis_reference=obis_references.ELECTRICITY_EXPORTED_TOTAL,
dsmr_versions={"5L", "5S", "Q3D"},
force_update=True,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
),
DSMRSensorEntityDescription(
key="hourly_gas_meter_reading",
translation_key="gas_meter_reading",
obis_reference=obis_references.HOURLY_GAS_METER_READING,
dsmr_versions={"4", "5", "5L"},
is_gas=True,
force_update=True,
device_class=SensorDeviceClass.GAS,
state_class=SensorStateClass.TOTAL_INCREASING,
),
DSMRSensorEntityDescription(
key="belgium_5min_gas_meter_reading",
translation_key="gas_meter_reading",
obis_reference=obis_references.BELGIUM_5MIN_GAS_METER_READING,
dsmr_versions={"5B"},
is_gas=True,
force_update=True,
device_class=SensorDeviceClass.GAS,
state_class=SensorStateClass.TOTAL_INCREASING,
),
DSMRSensorEntityDescription(
key="gas_meter_reading",
translation_key="gas_meter_reading",
obis_reference=obis_references.GAS_METER_READING,
dsmr_versions={"2.2"},
is_gas=True,
force_update=True,
device_class=SensorDeviceClass.GAS,
state_class=SensorStateClass.TOTAL_INCREASING,
),
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up the DSMR sensor."""
dsmr_version = entry.data[CONF_DSMR_VERSION]
entities = [
DSMREntity(description, entry)
for description in SENSORS
if (
description.dsmr_versions is None
or dsmr_version in description.dsmr_versions
)
and (not description.is_gas or CONF_SERIAL_ID_GAS in entry.data)
]
async_add_entities(entities)
min_time_between_updates = timedelta(
seconds=entry.options.get(CONF_TIME_BETWEEN_UPDATE, DEFAULT_TIME_BETWEEN_UPDATE)
)
@Throttle(min_time_between_updates)
def update_entities_telegram(telegram: dict[str, DSMRObject] | None) -> None:
"""Update entities with latest telegram and trigger state update."""
# Make all device entities aware of new telegram
for entity in entities:
entity.update_data(telegram)
# Creates an asyncio.Protocol factory for reading DSMR telegrams from
# serial and calls update_entities_telegram to update entities on arrival
protocol = entry.data.get(CONF_PROTOCOL, DSMR_PROTOCOL)
if CONF_HOST in entry.data:
if protocol == DSMR_PROTOCOL:
create_reader = create_tcp_dsmr_reader
else:
create_reader = create_rfxtrx_tcp_dsmr_reader
reader_factory = partial(
create_reader,
entry.data[CONF_HOST],
entry.data[CONF_PORT],
dsmr_version,
update_entities_telegram,
loop=hass.loop,
keep_alive_interval=60,
)
else:
if protocol == DSMR_PROTOCOL:
create_reader = create_dsmr_reader
else:
create_reader = create_rfxtrx_dsmr_reader
reader_factory = partial(
create_reader,
entry.data[CONF_PORT],
dsmr_version,
update_entities_telegram,
loop=hass.loop,
)
async def connect_and_reconnect() -> None:
"""Connect to DSMR and keep reconnecting until Home Assistant stops."""
stop_listener = None
transport = None
protocol = None
while hass.state == CoreState.not_running or hass.is_running:
# Start DSMR asyncio.Protocol reader
# Reflect connected state in devices state by setting an
# empty telegram resulting in `unknown` states
update_entities_telegram({})
try:
transport, protocol = await hass.loop.create_task(reader_factory())
if transport:
# Register listener to close transport on HA shutdown
@callback
def close_transport(_event: Event) -> None:
"""Close the transport on HA shutdown."""
if not transport: # noqa: B023
return
transport.close() # noqa: B023
stop_listener = hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, close_transport
)
# Wait for reader to close
await protocol.wait_closed()
# Unexpected disconnect
if hass.state == CoreState.not_running or hass.is_running:
stop_listener()
transport = None
protocol = None
# Reflect disconnect state in devices state by setting an
# None telegram resulting in `unavailable` states
update_entities_telegram(None)
# throttle reconnect attempts
await asyncio.sleep(
entry.data.get(CONF_RECONNECT_INTERVAL, DEFAULT_RECONNECT_INTERVAL)
)
except (serial.serialutil.SerialException, OSError):
# Log any error while establishing connection and drop to retry
# connection wait
LOGGER.exception("Error connecting to DSMR")
transport = None
protocol = None
# Reflect disconnect state in devices state by setting an
# None telegram resulting in `unavailable` states
update_entities_telegram(None)
# throttle reconnect attempts
await asyncio.sleep(
entry.data.get(CONF_RECONNECT_INTERVAL, DEFAULT_RECONNECT_INTERVAL)
)
except CancelledError:
# Reflect disconnect state in devices state by setting an
# None telegram resulting in `unavailable` states
update_entities_telegram(None)
if stop_listener and (
hass.state == CoreState.not_running or hass.is_running
):
stop_listener()
if transport:
transport.close()
if protocol:
await protocol.wait_closed()
return
# Can't be hass.async_add_job because job runs forever
task = asyncio.create_task(connect_and_reconnect())
@callback
async def _async_stop(_: Event) -> None:
task.cancel()
# Make sure task is cancelled on shutdown (or tests complete)
entry.async_on_unload(
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _async_stop)
)
# Save the task to be able to cancel it when unloading
hass.data[DOMAIN][entry.entry_id][DATA_TASK] = task
class DSMREntity(SensorEntity):
"""Entity reading values from DSMR telegram."""
entity_description: DSMRSensorEntityDescription
_attr_has_entity_name = True
_attr_should_poll = False
def __init__(
self, entity_description: DSMRSensorEntityDescription, entry: ConfigEntry
) -> None:
"""Initialize entity."""
self.entity_description = entity_description
self._entry = entry
self.telegram: dict[str, DSMRObject] | None = {}
device_serial = entry.data[CONF_SERIAL_ID]
device_name = DEVICE_NAME_ELECTRICITY
if entity_description.is_gas:
device_serial = entry.data[CONF_SERIAL_ID_GAS]
device_name = DEVICE_NAME_GAS
if device_serial is None:
device_serial = entry.entry_id
self._attr_device_info = DeviceInfo(
identifiers={(DOMAIN, device_serial)},
name=device_name,
)
self._attr_unique_id = f"{device_serial}_{entity_description.key}"
@callback
def update_data(self, telegram: dict[str, DSMRObject] | None) -> None:
"""Update data."""
self.telegram = telegram
if self.hass and (
telegram is None or self.entity_description.obis_reference in telegram
):
self.async_write_ha_state()
def get_dsmr_object_attr(self, attribute: str) -> str | None:
"""Read attribute from last received telegram for this DSMR object."""
# Make sure telegram contains an object for this entities obis
if (
self.telegram is None
or self.entity_description.obis_reference not in self.telegram
):
return None
# Get the attribute value if the object has it
dsmr_object = self.telegram[self.entity_description.obis_reference]
attr: str | None = getattr(dsmr_object, attribute)
return attr
@property
def available(self) -> bool:
"""Entity is only available if there is a telegram."""
return self.telegram is not None
@property
def device_class(self) -> SensorDeviceClass | None:
"""Return the device class of this entity."""
device_class = super().device_class
# Override device class for gas sensors providing energy units, like
# kWh, MWh, GJ, etc. In those cases, the class should be energy, not gas
with suppress(ValueError):
if device_class == SensorDeviceClass.GAS and UnitOfEnergy(
str(self.native_unit_of_measurement)
):
return SensorDeviceClass.ENERGY
return device_class
@property
def native_value(self) -> StateType:
"""Return the state of sensor, if available, translate if needed."""
value: StateType
if (value := self.get_dsmr_object_attr("value")) is None:
return None
if (
self.entity_description.obis_reference
== obis_references.ELECTRICITY_ACTIVE_TARIFF
):
return self.translate_tariff(value, self._entry.data[CONF_DSMR_VERSION])
with suppress(TypeError):
value = round(
float(value), self._entry.data.get(CONF_PRECISION, DEFAULT_PRECISION)
)
return value
@property
def native_unit_of_measurement(self) -> str | None:
"""Return the unit of measurement of this entity, if any."""
unit_of_measurement = self.get_dsmr_object_attr("unit")
if unit_of_measurement in UNIT_CONVERSION:
return UNIT_CONVERSION[unit_of_measurement]
return unit_of_measurement
@staticmethod
def translate_tariff(value: str, dsmr_version: str) -> str | None:
"""Convert 2/1 to normal/low depending on DSMR version."""
# DSMR V5B: Note: In Belgium values are swapped:
# Rate code 2 is used for low rate and rate code 1 is used for normal rate.
if dsmr_version == "5B":
if value == "0001":
value = "0002"
elif value == "0002":
value = "0001"
# DSMR V2.2: Note: Rate code 1 is used for low rate and rate code 2 is
# used for normal rate.
if value == "0002":
return "normal"
if value == "0001":
return "low"
return None
|
d88d0e2c8f42c3750ca43d761bc29597f39fe041
|
908f506bf9cf61ea302c67d17170555d943f1e65
|
/dcnnt/common/jsonrpc.py
|
dea2e5a4a386cffffbde3feb40ee31df90a17d5c
|
[
"MIT"
] |
permissive
|
cyanomiko/dcnnt-py
|
2a0ea4aaf0bbb2c448d28b64354c93b37393b078
|
4084ea18f96d9a953315f5555fca45f26639b8be
|
refs/heads/master
| 2023-08-19T17:00:51.634328
| 2022-12-28T20:22:09
| 2022-12-28T20:22:09
| 252,572,044
| 147
| 11
|
MIT
| 2023-08-15T05:29:48
| 2020-04-02T21:49:46
|
Python
|
UTF-8
|
Python
| false
| false
| 7,807
|
py
|
jsonrpc.py
|
import json
from typing import Dict, Any, Union, Optional, Callable, Iterable
class RPCObject:
"""Just group of other classes"""
__slots__ = ()
def to_dict(self) -> Dict[str, Any]:
"""Convert to JSON-RPC 2.0 dictionary"""
raise NotImplementedError
class RPCError(BaseException, RPCObject):
"""JSON-RPC 2.0 error object"""
__slots__ = 'code', 'message', 'data'
def __init__(self, code: int, message: str = 'error', data=None):
self.code, self.message, self.data = code, message, data
def __repr__(self):
return '<JSON-RPC 2.0 Error [{}]: {} - "{}">'.format(self.code, self.message, self.data)
def to_dict(self) -> Dict[str, Any]:
"""Convert to JSON-RPC 2.0 dictionary"""
d = dict(code=self.code, message=self.message)
if self.data is not None:
d['data'] = self.data
return d
def add_data(self, data):
"""Create copy of object with data added"""
return type(self).__call__(self.code, self.message, data)
@classmethod
def from_dict(cls, d: Dict[str, Any]):
"""Create RPCRequest object from dictionary"""
return cls(d['code'], d['message'], d.get('data'))
PARSE_ERROR = RPCError(-32700, 'Parse error')
INVALID_REQUEST_ERROR = RPCError(-32600, 'Invalid Request')
METHOD_NOT_FOUND_ERROR = RPCError(-32601, 'Method not found')
INVALID_PARAMS_ERROR = RPCError(-32602, 'Invalid params')
INTERNAL_ERROR = RPCError(-32603, 'Internal error')
SERVER_ERROR = RPCError(-32000, 'Server error')
class RPCRequest(RPCObject):
"""JSON-RPC 2.0 request/notification object"""
__slots__ = 'id', 'method', 'params'
def __init__(self, method: str, params: Union[tuple, list, dict], id: Optional[int] = None):
assert isinstance(method, str), '"method" MUST be str'
assert isinstance(params, (tuple, list, dict)) or params is None, '"params" MUST be tuple, list, dict or None'
assert isinstance(id, (int, str)) or id is None, '"id" MUST be int, str or None'
self.method, self.params, self.id = method, params, id
def __repr__(self):
return f'<JSON-RPC 2.0 Request [{self.id}]: {self.method}({self.params})>'
def to_dict(self) -> Dict[str, Any]:
"""Convert to JSON-RPC 2.0 dictionary"""
d = dict(jsonrpc='2.0', method=self.method, params=self.params)
if self.id is not None:
d['id'] = self.id
return d
@classmethod
def from_dict(cls, d: Dict[str, Any]):
"""Create RPCRequest object from dictionary"""
try:
return cls(d['method'], d['params'], d.get('id'))
except (KeyError, AssertionError) as e:
raise INVALID_REQUEST_ERROR.add_data('{}: {}'.format(type(e), str(e)))
class RPCResponse(RPCObject):
"""JSON-RPC 2.0 response object"""
__slots__ = 'id', 'result', 'error'
def __init__(self, id: int, result: Any):
assert isinstance(id, (int, str)) or id is None, '"id" MUST be int, str or None'
self.id = id
if isinstance(result, RPCError):
self.error, self.result = result, None
else:
self.error, self.result = None, result
def __repr__(self):
return f'<JSON-RPC 2.0 Request [{self.id}]: {self.error if self.result is None else self.result}>'
def to_dict(self) -> Dict[str, Any]:
"""Convert to JSON-RPC 2.0 dictionary"""
d = dict(jsonrpc='2.0', id=self.id)
if self.error is not None:
d['error'] = self.error.to_dict()
if self.result is not None:
d['result'] = self.result
return d
@classmethod
def from_dict(cls, d: Dict[str, Any]):
"""Create RPCRequest object from dictionary"""
try:
result = d.get('result')
error = d.get('error')
if (result is not None and error is not None) or (result is None and error is None):
raise INVALID_REQUEST_ERROR.add_data('MUST contain result XOR error')
return cls(d['id'], result if error is None else RPCError.from_dict(error))
except (KeyError, AssertionError) as e:
raise INVALID_REQUEST_ERROR.add_data('{}: {}'.format(type(e), str(e)))
# ToDo: Rewrite dispatcher and serializer and use them in base plugin class
class RPCDispatcher:
"""Get decoded requests and return results (success or error) from corresponding methods"""
def __init__(self, methods: Union[Dict[str, Callable], Iterable[Callable]]):
self.methods: Dict[str, Callable] = methods if isinstance(methods, dict) \
else {func.__name__: func for func in methods}
def dispatch(self, request: RPCRequest):
"""Check if request is correct, execute RPC method and return response"""
func = self.methods.get(request.method)
if func is None:
return None if request.id is None else RPCResponse(request.id, METHOD_NOT_FOUND_ERROR)
else:
try:
result = func(**request.params) if isinstance(request.params, dict) else func(*request.params)
return None if request.id is None else RPCResponse(request.id, result)
except TypeError as e:
return None if request.id is None else RPCResponse(request.id, INVALID_PARAMS_ERROR.add_data(str(e)))
except BaseException as e:
return None if request.id is None else RPCResponse(request.id, INTERNAL_ERROR.add_data(str(e)))
class RPCSerializer:
"""Methods to serialize and deserialize JSON-RPC objects"""
def __init__(self, ensure_ascii=True, length_bytes=None, order='big', separator=b''):
self.ensure_ascii, self.length_bytes, self.separator, self.order = ensure_ascii, length_bytes, separator, order
def to_bytes(self, obj):
"""Serialize JSON-RPC object to bytes"""
try:
return json.dumps(obj.to_dict(), separators=(',', ':'), ensure_ascii=self.ensure_ascii).encode()
except BaseException:
packed = json.dumps(RPCResponse(obj.id, INTERNAL_ERROR),
separators=(',', ':'), ensure_ascii=self.ensure_ascii).encode()
return b''.join((len(packed).to_bytes(self.length_bytes, self.order) if self.length_bytes else b'',
packed, self.separator))
def from_bytes(self, raw):
"""Extract JSON-RPC objects from byte string"""
res = list()
try:
data = json.loads(raw.decode())
if not isinstance(data, list):
data = (data, )
for d in data:
try:
if not isinstance(d, dict):
res.append(INVALID_REQUEST_ERROR.add_data('Not object'))
if 'jsonrpc' not in d:
res.append(INVALID_REQUEST_ERROR.add_data('No "jsonrpc" key'))
if d['jsonrpc'] != '2.0':
res.append(INVALID_REQUEST_ERROR.add_data('JSON-RPC version != 2.0'))
if 'method' in d:
res.append(RPCRequest.from_dict(d))
elif 'result' in d or 'error' in d:
res.append(RPCResponse.from_dict(d))
else:
res.append(INVALID_REQUEST_ERROR.add_data('Not request or response'))
except RPCError as e:
res.append(e)
except BaseException as e:
res.append(SERVER_ERROR)
except json.JSONDecodeError:
res.append(PARSE_ERROR.add_data('JSON error'))
except UnicodeDecodeError:
res.append(PARSE_ERROR.add_data('UTF-8 error'))
except BaseException:
res.append(SERVER_ERROR)
return res
|
2b49fb9d01a6a049fc2878157fc0a3219978938a
|
0869d7edac80e8aebe951682a2cc311a083eade3
|
/Python/example_controllers/obi/cloth_volume.py
|
c356289c5d7d3d363ab69509b0dd6a5c3f93c42e
|
[
"BSD-2-Clause"
] |
permissive
|
threedworld-mit/tdw
|
7d5b4453832647733ff91ad7a7ce7ec2320454c1
|
9df96fba455b327bb360d8dd5886d8754046c690
|
refs/heads/master
| 2023-09-01T11:45:28.132298
| 2023-08-31T16:13:30
| 2023-08-31T16:13:30
| 245,492,977
| 427
| 75
|
BSD-2-Clause
| 2023-09-14T17:36:12
| 2020-03-06T18:42:09
|
Python
|
UTF-8
|
Python
| false
| false
| 1,094
|
py
|
cloth_volume.py
|
from tdw.controller import Controller
from tdw.tdw_utils import TDWUtils
from tdw.add_ons.obi import Obi
from tdw.add_ons.third_person_camera import ThirdPersonCamera
from tdw.obi_data.cloth.volume_type import ClothVolumeType
"""
Minimal example of adding a cloth volume to a scene.
"""
c = Controller()
cloth_id = Controller.get_unique_id()
camera = ThirdPersonCamera(position={"x": -3.75, "y": 1.5, "z": -0.5},
look_at={"x": 0, "y": 0.5, "z": 0})
obi = Obi()
c.add_ons.extend([camera, obi])
# Add the cloth volume.
obi.create_cloth_volume(cloth_material="canvas",
object_id=cloth_id,
position={"x": 0, "y": 1.0, "z": 0},
rotation={"x": 0, "y": 0, "z": 0},
volume_type=ClothVolumeType.sphere,
scale_factor={"x": 0.5, "y": 0.5, "z": 0.5},
pressure=3.0,
solver_id=0)
c.communicate(TDWUtils.create_empty_room(12, 12))
for i in range(200):
c.communicate([])
c.communicate({"$type": "terminate"})
|
67037eaeacf52262a4eb862eda3f9eb17de6e68d
|
5d55e0885bacd718588f25b71675c1127c93fc0a
|
/river/drift/binary/ddm.py
|
24706d61373318cab71fd58460a703c577517967
|
[
"BSD-3-Clause"
] |
permissive
|
online-ml/river
|
5698b60e65493eba28d0c0c1992f19eb996c0bfa
|
c658393084ed4147a782daa6bcd4a467c3abb0cb
|
refs/heads/main
| 2023-09-03T00:12:55.121301
| 2023-08-29T12:04:20
| 2023-08-29T12:04:20
| 167,388,434
| 3,372
| 389
|
BSD-3-Clause
| 2023-09-12T08:11:15
| 2019-01-24T15:18:26
|
Python
|
UTF-8
|
Python
| false
| false
| 4,963
|
py
|
ddm.py
|
from __future__ import annotations
import math
from river import base, stats
class DDM(base.BinaryDriftAndWarningDetector):
"""Drift Detection Method.
DDM (Drift Detection Method) is a concept change detection method
based on the PAC learning model premise, that the learner's error rate
will decrease as the number of analysed samples increase, as long as the
data distribution is stationary.
If the algorithm detects an increase in the error rate, that surpasses
a calculated threshold, either change is detected or the algorithm will
warn the user that change may occur in the near future, which is called
the warning zone.
The detection threshold is calculated in function of two statistics,
obtained when $(p_i + s_i)$ is minimum:
* $p_{min}$: The minimum recorded error rate.
* $s_{min}$: The minimum recorded standard deviation.
At instant $i$, the detection algorithm uses:
* $p_i$: The error rate at instant $i$.
* $s_i$: The standard deviation at instant $i$.
The conditions for entering the warning zone and detecting change are
as follows [see implementation note below]:
* if $p_i + s_i \\geq p_{min} + w_l * s_{min}$ -> Warning zone
* if $p_i + s_i \\geq p_{min} + d_l * s_{min}$ -> Change detected
In the above expressions, $w_l$ and $d_l$ represent, respectively, the warning and drift thresholds.
**Input:** `x` is an entry in a stream of bits, where 1 indicates error/failure and 0
represents correct/normal values.
For example, if a classifier's prediction $y'$ is right or wrong w.r.t. the
true target label $y$:
- 0: Correct, $y=y'$
- 1: Error, $y \\neq y'$
Parameters
----------
warm_start
The minimum required number of analyzed samples so change can be detected. Warm start parameter
for the drift detector.
warning_threshold
Threshold to decide if the detector is in a warning zone. The default value gives 95\\% of
confidence level to the warning assessment.
drift_threshold
Threshold to decide if a drift was detected. The default value gives a 99\\% of confidence
level to the drift assessment.
Examples
--------
>>> import random
>>> from river import drift
>>> rng = random.Random(42)
>>> ddm = drift.binary.DDM()
>>> # Simulate a data stream where the first 1000 instances come from a uniform distribution
>>> # of 1's and 0's
>>> data_stream = rng.choices([0, 1], k=1000)
>>> # Increase the probability of 1's appearing in the next 1000 instances
>>> data_stream = data_stream + rng.choices([0, 1], k=1000, weights=[0.3, 0.7])
>>> print_warning = True
>>> # Update drift detector and verify if change is detected
>>> for i, x in enumerate(data_stream):
... _ = ddm.update(x)
... if ddm.warning_detected and print_warning:
... print(f"Warning detected at index {i}")
... print_warning = False
... if ddm.drift_detected:
... print(f"Change detected at index {i}")
... print_warning = True
Warning detected at index 1084
Change detected at index 1334
Warning detected at index 1492
References
----------
[^1]: João Gama, Pedro Medas, Gladys Castillo, Pedro Pereira Rodrigues: Learning with Drift Detection. SBIA 2004: 286-295
"""
def __init__(
self, warm_start: int = 30, warning_threshold: float = 2.0, drift_threshold: float = 3.0
):
super().__init__()
self.warm_start = warm_start
self.warning_threshold = warning_threshold
self.drift_threshold = drift_threshold
self._reset()
def _reset(self):
super()._reset()
# Probability of error/failure
self._p = stats.Mean()
# Minimum values observed
self._p_min = None
self._s_min = None
# The sum of p_min and s_min, to avoid calculating it every time
self._ps_min = float("inf")
def update(self, x):
if self.drift_detected:
self._reset()
# Probability of error/failure
self._p.update(x)
p_i = self._p.get()
n = self._p.n
# Standard deviation of the error/failure: calculated using Bernoulli's properties
s_i = math.sqrt(p_i * (1 - p_i) / n)
if n > self.warm_start:
if p_i + s_i <= self._ps_min:
self._p_min = p_i
self._s_min = s_i
self._ps_min = self._p_min + self._s_min
if p_i + s_i > self._p_min + self.warning_threshold * self._s_min:
self._warning_detected = True
else:
self._warning_detected = False
if p_i + s_i > self._p_min + self.drift_threshold * self._s_min:
self._drift_detected = True
self._warning_detected = False
return self
|
1b9bdb75106795e5786e3efdec797f709717bd93
|
0db19410e9751790af8ce4a0a9332293e379c02f
|
/demo/bottomup_demo.py
|
3d6fee7a03cc191def11924be362a63c64b7dfff
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmpose
|
2c9986521d35eee35d822fb255e8e68486026d94
|
537bd8e543ab463fb55120d5caaa1ae22d6aaf06
|
refs/heads/main
| 2023-08-30T19:44:21.349410
| 2023-07-04T13:18:22
| 2023-07-04T13:18:22
| 278,003,645
| 4,037
| 1,171
|
Apache-2.0
| 2023-09-14T09:44:55
| 2020-07-08T06:02:55
|
Python
|
UTF-8
|
Python
| false
| false
| 6,863
|
py
|
bottomup_demo.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import mimetypes
import os
import time
from argparse import ArgumentParser
import cv2
import json_tricks as json
import mmcv
import mmengine
import numpy as np
from mmpose.apis import inference_bottomup, init_model
from mmpose.registry import VISUALIZERS
from mmpose.structures import split_instances
def process_one_image(args,
img,
pose_estimator,
visualizer=None,
show_interval=0):
"""Visualize predicted keypoints (and heatmaps) of one image."""
# inference a single image
batch_results = inference_bottomup(pose_estimator, img)
results = batch_results[0]
# show the results
if isinstance(img, str):
img = mmcv.imread(img, channel_order='rgb')
elif isinstance(img, np.ndarray):
img = mmcv.bgr2rgb(img)
if visualizer is not None:
visualizer.add_datasample(
'result',
img,
data_sample=results,
draw_gt=False,
draw_bbox=False,
draw_heatmap=args.draw_heatmap,
show_kpt_idx=args.show_kpt_idx,
show=args.show,
wait_time=show_interval,
kpt_thr=args.kpt_thr)
return results.pred_instances
def parse_args():
parser = ArgumentParser()
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--input', type=str, default='', help='Image/Video file')
parser.add_argument(
'--show',
action='store_true',
default=False,
help='whether to show img')
parser.add_argument(
'--output-root',
type=str,
default='',
help='root of the output img file. '
'Default not saving the visualization images.')
parser.add_argument(
'--save-predictions',
action='store_true',
default=False,
help='whether to save predicted results')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--draw-heatmap',
action='store_true',
help='Visualize the predicted heatmap')
parser.add_argument(
'--show-kpt-idx',
action='store_true',
default=False,
help='Whether to show the index of keypoints')
parser.add_argument(
'--kpt-thr', type=float, default=0.3, help='Keypoint score threshold')
parser.add_argument(
'--radius',
type=int,
default=3,
help='Keypoint radius for visualization')
parser.add_argument(
'--thickness',
type=int,
default=1,
help='Link thickness for visualization')
parser.add_argument(
'--show-interval', type=int, default=0, help='Sleep seconds per frame')
args = parser.parse_args()
return args
def main():
args = parse_args()
assert args.show or (args.output_root != '')
assert args.input != ''
output_file = None
if args.output_root:
mmengine.mkdir_or_exist(args.output_root)
output_file = os.path.join(args.output_root,
os.path.basename(args.input))
if args.input == 'webcam':
output_file += '.mp4'
if args.save_predictions:
assert args.output_root != ''
args.pred_save_path = f'{args.output_root}/results_' \
f'{os.path.splitext(os.path.basename(args.input))[0]}.json'
# build the model from a config file and a checkpoint file
if args.draw_heatmap:
cfg_options = dict(model=dict(test_cfg=dict(output_heatmaps=True)))
else:
cfg_options = None
model = init_model(
args.config,
args.checkpoint,
device=args.device,
cfg_options=cfg_options)
# build visualizer
model.cfg.visualizer.radius = args.radius
model.cfg.visualizer.line_width = args.thickness
visualizer = VISUALIZERS.build(model.cfg.visualizer)
visualizer.set_dataset_meta(model.dataset_meta)
if args.input == 'webcam':
input_type = 'webcam'
else:
input_type = mimetypes.guess_type(args.input)[0].split('/')[0]
if input_type == 'image':
# inference
pred_instances = process_one_image(
args, args.input, model, visualizer, show_interval=0)
if args.save_predictions:
pred_instances_list = split_instances(pred_instances)
if output_file:
img_vis = visualizer.get_image()
mmcv.imwrite(mmcv.rgb2bgr(img_vis), output_file)
elif input_type in ['webcam', 'video']:
if args.input == 'webcam':
cap = cv2.VideoCapture(0)
else:
cap = cv2.VideoCapture(args.input)
video_writer = None
pred_instances_list = []
frame_idx = 0
while cap.isOpened():
success, frame = cap.read()
frame_idx += 1
if not success:
break
pred_instances = process_one_image(args, frame, model, visualizer,
0.001)
if args.save_predictions:
# save prediction results
pred_instances_list.append(
dict(
frame_id=frame_idx,
instances=split_instances(pred_instances)))
# output videos
if output_file:
frame_vis = visualizer.get_image()
if video_writer is None:
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
# the size of the image with visualization may vary
# depending on the presence of heatmaps
video_writer = cv2.VideoWriter(
output_file,
fourcc,
25, # saved fps
(frame_vis.shape[1], frame_vis.shape[0]))
video_writer.write(mmcv.rgb2bgr(frame_vis))
# press ESC to exit
if cv2.waitKey(5) & 0xFF == 27:
break
time.sleep(args.show_interval)
if video_writer:
video_writer.release()
cap.release()
else:
args.save_predictions = False
raise ValueError(
f'file {os.path.basename(args.input)} has invalid format.')
if args.save_predictions:
with open(args.pred_save_path, 'w') as f:
json.dump(
dict(
meta_info=model.dataset_meta,
instance_info=pred_instances_list),
f,
indent='\t')
print(f'predictions have been saved at {args.pred_save_path}')
if __name__ == '__main__':
main()
|
c7322e7b6192863db0288ea8439dc9d9c0dddb40
|
baa2c6f22ff563d417e34692bf3345077eb8fa5f
|
/docs/autogen_shortcuts.py
|
23b47111665240eec4b70b1dd9a6db183f57c760
|
[
"BSD-3-Clause"
] |
permissive
|
ipython/ipython
|
c42ea223b6e391bb7dd39888cb959d4d5d6b21a1
|
e5103f971233fd66b558585cce7a4f52a716cd56
|
refs/heads/main
| 2023-08-30T18:27:18.436521
| 2023-08-29T12:16:00
| 2023-08-29T12:16:00
| 658,518
| 13,673
| 4,729
|
BSD-3-Clause
| 2023-09-12T20:22:09
| 2010-05-10T04:46:06
|
Python
|
UTF-8
|
Python
| false
| false
| 6,798
|
py
|
autogen_shortcuts.py
|
from dataclasses import dataclass
from inspect import getsource
from pathlib import Path
from typing import cast, List, Union
from html import escape as html_escape
import re
from prompt_toolkit.keys import KEY_ALIASES
from prompt_toolkit.key_binding import KeyBindingsBase
from prompt_toolkit.filters import Filter, Condition
from prompt_toolkit.shortcuts import PromptSession
from IPython.terminal.shortcuts import create_ipython_shortcuts, create_identifier
from IPython.terminal.shortcuts.filters import KEYBINDING_FILTERS
@dataclass
class Shortcut:
#: a sequence of keys (each element on the list corresponds to pressing one or more keys)
keys_sequence: List[str]
filter: str
@dataclass
class Handler:
description: str
identifier: str
@dataclass
class Binding:
handler: Handler
shortcut: Shortcut
class _NestedFilter(Filter):
"""Protocol reflecting non-public prompt_toolkit's `_AndList` and `_OrList`."""
filters: List[Filter]
class _Invert(Filter):
"""Protocol reflecting non-public prompt_toolkit's `_Invert`."""
filter: Filter
conjunctions_labels = {"_AndList": "&", "_OrList": "|"}
ATOMIC_CLASSES = {"Never", "Always", "Condition"}
HUMAN_NAMES_FOR_FILTERS = {
filter_: name for name, filter_ in KEYBINDING_FILTERS.items()
}
def format_filter(
filter_: Union[Filter, _NestedFilter, Condition, _Invert],
is_top_level=True,
skip=None,
) -> str:
"""Create easily readable description of the filter."""
s = filter_.__class__.__name__
if s == "Condition":
func = cast(Condition, filter_).func
if filter_ in HUMAN_NAMES_FOR_FILTERS:
return HUMAN_NAMES_FOR_FILTERS[filter_]
name = func.__name__
if name == "<lambda>":
source = getsource(func)
return source.split("=")[0].strip()
return func.__name__
elif s == "_Invert":
operand = cast(_Invert, filter_).filter
if operand.__class__.__name__ in ATOMIC_CLASSES:
return f"~{format_filter(operand, is_top_level=False)}"
return f"~({format_filter(operand, is_top_level=False)})"
elif s in conjunctions_labels:
filters = cast(_NestedFilter, filter_).filters
if filter_ in HUMAN_NAMES_FOR_FILTERS:
return HUMAN_NAMES_FOR_FILTERS[filter_]
conjunction = conjunctions_labels[s]
glue = f" {conjunction} "
result = glue.join(format_filter(x, is_top_level=False) for x in filters)
if len(filters) > 1 and not is_top_level:
result = f"({result})"
return result
elif s in ["Never", "Always"]:
return s.lower()
elif s == "PassThrough":
return "pass_through"
else:
raise ValueError(f"Unknown filter type: {filter_}")
def sentencize(s) -> str:
"""Extract first sentence"""
s = re.split(r"\.\W", s.replace("\n", " ").strip())
s = s[0] if len(s) else ""
if not s.endswith("."):
s += "."
try:
return " ".join(s.split())
except AttributeError:
return s
class _DummyTerminal:
"""Used as a buffer to get prompt_toolkit bindings
"""
handle_return = None
input_transformer_manager = None
display_completions = None
editing_mode = "emacs"
auto_suggest = None
def bindings_from_prompt_toolkit(prompt_bindings: KeyBindingsBase) -> List[Binding]:
"""Collect bindings to a simple format that does not depend on prompt-toolkit internals"""
bindings: List[Binding] = []
for kb in prompt_bindings.bindings:
bindings.append(
Binding(
handler=Handler(
description=kb.handler.__doc__ or "",
identifier=create_identifier(kb.handler),
),
shortcut=Shortcut(
keys_sequence=[
str(k.value) if hasattr(k, "value") else k for k in kb.keys
],
filter=format_filter(kb.filter, skip={"has_focus_filter"}),
),
)
)
return bindings
INDISTINGUISHABLE_KEYS = {**KEY_ALIASES, **{v: k for k, v in KEY_ALIASES.items()}}
def format_prompt_keys(keys: str, add_alternatives=True) -> str:
"""Format prompt toolkit key with modifier into an RST representation."""
def to_rst(key):
escaped = key.replace("\\", "\\\\")
return f":kbd:`{escaped}`"
keys_to_press: List[str]
prefixes = {
"c-s-": [to_rst("ctrl"), to_rst("shift")],
"s-c-": [to_rst("ctrl"), to_rst("shift")],
"c-": [to_rst("ctrl")],
"s-": [to_rst("shift")],
}
for prefix, modifiers in prefixes.items():
if keys.startswith(prefix):
remainder = keys[len(prefix) :]
keys_to_press = [*modifiers, to_rst(remainder)]
break
else:
keys_to_press = [to_rst(keys)]
result = " + ".join(keys_to_press)
if keys in INDISTINGUISHABLE_KEYS and add_alternatives:
alternative = INDISTINGUISHABLE_KEYS[keys]
result = (
result
+ " (or "
+ format_prompt_keys(alternative, add_alternatives=False)
+ ")"
)
return result
if __name__ == '__main__':
here = Path(__file__).parent
dest = here / "source" / "config" / "shortcuts"
ipy_bindings = create_ipython_shortcuts(_DummyTerminal())
session = PromptSession(key_bindings=ipy_bindings)
prompt_bindings = session.app.key_bindings
assert prompt_bindings
# Ensure that we collected the default shortcuts
assert len(prompt_bindings.bindings) > len(ipy_bindings.bindings)
bindings = bindings_from_prompt_toolkit(prompt_bindings)
def sort_key(binding: Binding):
return binding.handler.identifier, binding.shortcut.filter
filters = []
with (dest / "table.tsv").open("w", encoding="utf-8") as csv:
for binding in sorted(bindings, key=sort_key):
sequence = ", ".join(
[format_prompt_keys(keys) for keys in binding.shortcut.keys_sequence]
)
if binding.shortcut.filter == "always":
condition_label = "-"
else:
# we cannot fit all the columns as the filters got too complex over time
condition_label = "ⓘ"
csv.write(
"\t".join(
[
sequence,
sentencize(binding.handler.description)
+ f" :raw-html:`<br>` `{binding.handler.identifier}`",
f':raw-html:`<span title="{html_escape(binding.shortcut.filter)}" style="cursor: help">{condition_label}</span>`',
]
)
+ "\n"
)
|
9e6fbeb91b9b96eafcfe80a1899fbd573527781a
|
40e8515dba964d51660f72bb497ac7fbb3246b2c
|
/seleniumwire/har.py
|
506af8f2eb56a5432661b4372daf3df3bf256231
|
[
"MIT"
] |
permissive
|
wkeeling/selenium-wire
|
47e4caf4ed114aa274d428736779277716b3bada
|
79fa2e81e690b9c32882d006a6799c3a316fe346
|
refs/heads/master
| 2023-08-07T19:45:35.911730
| 2022-11-05T15:54:47
| 2022-11-05T15:54:47
| 137,925,467
| 1,625
| 232
|
MIT
| 2023-07-31T02:40:30
| 2018-06-19T17:43:24
|
Python
|
UTF-8
|
Python
| false
| false
| 6,661
|
py
|
har.py
|
"""Handles generation of HAR archives.
This code has been taken from the har_dump.py addon in the mitmproxy project.
"""
import base64
import json
from datetime import datetime, timezone
from typing import List, Set
import seleniumwire
from seleniumwire.thirdparty.mitmproxy import connections
from seleniumwire.thirdparty.mitmproxy.http import HTTPFlow
from seleniumwire.thirdparty.mitmproxy.net.http import cookies
from seleniumwire.thirdparty.mitmproxy.utils import strutils
# A list of server seen till now is maintained so we can avoid
# using 'connect' time for entries that use an existing connection.
SERVERS_SEEN: Set[connections.ServerConnection] = set()
def create_har_entry(flow: HTTPFlow) -> dict:
"""Create a HAR entry from the supplied flow.
Args:
flow: The current flow.
Returns: The HAR entry as a dictionary.
"""
# -1 indicates that these values do not apply to current request
ssl_time = -1
connect_time = -1
if flow.server_conn and flow.server_conn not in SERVERS_SEEN:
connect_time = flow.server_conn.timestamp_tcp_setup - flow.server_conn.timestamp_start
if flow.server_conn.timestamp_tls_setup is not None:
ssl_time = flow.server_conn.timestamp_tls_setup - flow.server_conn.timestamp_tcp_setup
SERVERS_SEEN.add(flow.server_conn)
# Calculate raw timings from timestamps. DNS timings can not be calculated
# for lack of a way to measure it. The same goes for HAR blocked.
# mitmproxy will open a server connection as soon as it receives the host
# and port from the client connection. So, the time spent waiting is actually
# spent waiting between request.timestamp_end and response.timestamp_start
# thus it correlates to HAR wait instead.
timings_raw = {
'send': flow.request.timestamp_end - flow.request.timestamp_start,
'receive': flow.response.timestamp_end - flow.response.timestamp_start,
'wait': flow.response.timestamp_start - flow.request.timestamp_end,
'connect': connect_time,
'ssl': ssl_time,
}
# HAR timings are integers in ms, so we re-encode the raw timings to that format.
timings = {k: int(1000 * v) if v != -1 else -1 for k, v in timings_raw.items()}
# full_time is the sum of all timings.
# Timings set to -1 will be ignored as per spec.
full_time = sum(v for v in timings.values() if v > -1)
started_date_time = datetime.fromtimestamp(flow.request.timestamp_start, timezone.utc).isoformat()
# Response body size and encoding
response_body_size = len(flow.response.raw_content) if flow.response.raw_content else 0
response_body_decoded_size = len(flow.response.content) if flow.response.content else 0
response_body_compression = response_body_decoded_size - response_body_size
entry = {
"startedDateTime": started_date_time,
"time": full_time,
"request": {
"method": flow.request.method,
"url": flow.request.url,
"httpVersion": flow.request.http_version,
"cookies": _format_request_cookies(flow.request.cookies.fields),
"headers": _name_value(flow.request.headers),
"queryString": _name_value(flow.request.query or {}),
"headersSize": len(str(flow.request.headers)),
"bodySize": len(flow.request.content),
},
"response": {
"status": flow.response.status_code,
"statusText": flow.response.reason,
"httpVersion": flow.response.http_version,
"cookies": _format_response_cookies(flow.response.cookies.fields),
"headers": _name_value(flow.response.headers),
"content": {
"size": response_body_size,
"compression": response_body_compression,
"mimeType": flow.response.headers.get('Content-Type', ''),
},
"redirectURL": flow.response.headers.get('Location', ''),
"headersSize": len(str(flow.response.headers)),
"bodySize": response_body_size,
},
"cache": {},
"timings": timings,
}
# Store binary data as base64
if strutils.is_mostly_bin(flow.response.content):
entry["response"]["content"]["text"] = base64.b64encode(flow.response.content).decode()
entry["response"]["content"]["encoding"] = "base64"
else:
entry["response"]["content"]["text"] = flow.response.get_text(strict=False)
if flow.request.method in ["POST", "PUT", "PATCH"]:
params = [{"name": a, "value": b} for a, b in flow.request.urlencoded_form.items(multi=True)]
entry["request"]["postData"] = {
"mimeType": flow.request.headers.get("Content-Type", ""),
"text": flow.request.get_text(strict=False),
"params": params,
}
if flow.server_conn.connected():
entry["serverIPAddress"] = str(flow.server_conn.ip_address[0])
return entry
def _format_cookies(cookie_list):
rv = []
for name, value, attrs in cookie_list:
cookie_har = {
"name": name,
"value": value,
}
# HAR only needs some attributes
for key in ["path", "domain", "comment"]:
if key in attrs:
cookie_har[key] = attrs[key]
# These keys need to be boolean!
for key in ["httpOnly", "secure"]:
cookie_har[key] = bool(key in attrs)
# Expiration time needs to be formatted
expire_ts = cookies.get_expiration_ts(attrs)
if expire_ts is not None:
cookie_har["expires"] = datetime.fromtimestamp(expire_ts, timezone.utc).isoformat()
rv.append(cookie_har)
return rv
def _format_request_cookies(fields):
return _format_cookies(cookies.group_cookies(fields))
def _format_response_cookies(fields):
return _format_cookies((c[0], c[1][0], c[1][1]) for c in fields)
def _name_value(obj):
"""
Convert (key, value) pairs to HAR format.
"""
return [{"name": k, "value": v} for k, v in obj.items()]
def generate_har(entries: List[dict]) -> str:
"""Generate a HAR as a JSON formatted string.
Args:
entries: A list of HAR entries.
Returns: A JSON formatted string.
"""
har = {
"log": {
"version": "1.2",
"creator": {
"name": "Selenium Wire HAR dump",
"version": seleniumwire.__version__,
"comment": f"Selenium Wire version {seleniumwire.__version__}",
},
"entries": entries,
}
}
return json.dumps(har, indent=2)
|
0ac6981f7e228636f81e8e1a7cf479b700e40572
|
0ca218c0f54dac33a2ade4accfdf8f5be3207588
|
/test/sql/test_selectable.py
|
a146a94c6003eb7fbc63af0cbe1950b85120d673
|
[
"MIT"
] |
permissive
|
sqlalchemy/sqlalchemy
|
9d949c67c9b5396b1f33e7ff0f3230c81babf5be
|
b382bff6e3464f039db0fd1f2ce1b79038675e48
|
refs/heads/main
| 2023-08-31T17:40:59.565421
| 2023-08-30T15:01:41
| 2023-08-30T15:01:41
| 159,271,175
| 8,083
| 1,489
|
MIT
| 2023-09-12T18:53:55
| 2018-11-27T03:35:03
|
Python
|
UTF-8
|
Python
| false
| false
| 130,634
|
py
|
test_selectable.py
|
"""Test various algorithmic properties of selectables."""
from itertools import zip_longest
from sqlalchemy import and_
from sqlalchemy import bindparam
from sqlalchemy import Boolean
from sqlalchemy import cast
from sqlalchemy import Column
from sqlalchemy import delete
from sqlalchemy import exc
from sqlalchemy import exists
from sqlalchemy import false
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy import insert
from sqlalchemy import Integer
from sqlalchemy import join
from sqlalchemy import literal_column
from sqlalchemy import MetaData
from sqlalchemy import not_
from sqlalchemy import null
from sqlalchemy import or_
from sqlalchemy import outerjoin
from sqlalchemy import select
from sqlalchemy import Sequence
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy import text
from sqlalchemy import true
from sqlalchemy import type_coerce
from sqlalchemy import TypeDecorator
from sqlalchemy import union
from sqlalchemy import update
from sqlalchemy import util
from sqlalchemy.sql import Alias
from sqlalchemy.sql import annotation
from sqlalchemy.sql import base
from sqlalchemy.sql import column
from sqlalchemy.sql import elements
from sqlalchemy.sql import LABEL_STYLE_DISAMBIGUATE_ONLY
from sqlalchemy.sql import LABEL_STYLE_TABLENAME_PLUS_COL
from sqlalchemy.sql import operators
from sqlalchemy.sql import table
from sqlalchemy.sql import util as sql_util
from sqlalchemy.sql import visitors
from sqlalchemy.sql.dml import Insert
from sqlalchemy.sql.selectable import LABEL_STYLE_NONE
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import AssertsExecutionResults
from sqlalchemy.testing import config
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import in_
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_not
from sqlalchemy.testing import ne_
from sqlalchemy.testing.assertions import expect_raises_message
from sqlalchemy.testing.provision import normalize_sequence
metadata = MetaData()
table1 = Table(
"table1",
metadata,
Column("col1", Integer, primary_key=True),
Column("col2", String(20)),
Column("col3", Integer),
Column("colx", Integer),
)
table2 = Table(
"table2",
metadata,
Column("col1", Integer, primary_key=True),
Column("col2", Integer, ForeignKey("table1.col1")),
Column("col3", String(20)),
Column("coly", Integer),
)
keyed = Table(
"keyed",
metadata,
Column("x", Integer, key="colx"),
Column("y", Integer, key="coly"),
Column("z", Integer),
)
class SelectableTest(
fixtures.TestBase, AssertsExecutionResults, AssertsCompiledSQL
):
__dialect__ = "default"
@testing.combinations(
(
(table1.c.col1, table1.c.col2),
[
{
"name": "col1",
"type": table1.c.col1.type,
"expr": table1.c.col1,
},
{
"name": "col2",
"type": table1.c.col2.type,
"expr": table1.c.col2,
},
],
),
(
(table1,),
[
{
"name": "col1",
"type": table1.c.col1.type,
"expr": table1.c.col1,
},
{
"name": "col2",
"type": table1.c.col2.type,
"expr": table1.c.col2,
},
{
"name": "col3",
"type": table1.c.col3.type,
"expr": table1.c.col3,
},
{
"name": "colx",
"type": table1.c.colx.type,
"expr": table1.c.colx,
},
],
),
(
(func.count(table1.c.col1),),
[
{
"name": "count",
"type": testing.eq_type_affinity(Integer),
"expr": testing.eq_clause_element(
func.count(table1.c.col1)
),
}
],
),
(
(func.count(table1.c.col1), func.count(table1.c.col2)),
[
{
"name": "count",
"type": testing.eq_type_affinity(Integer),
"expr": testing.eq_clause_element(
func.count(table1.c.col1)
),
},
{
"name": "count_1",
"type": testing.eq_type_affinity(Integer),
"expr": testing.eq_clause_element(
func.count(table1.c.col2)
),
},
],
),
)
def test_core_column_descriptions(self, cols, expected):
stmt = select(*cols)
# reverse eq_ is so eq_clause_element works
eq_(expected, stmt.column_descriptions)
@testing.combinations(insert, update, delete, argnames="dml_construct")
@testing.combinations(
(
table1,
(table1.c.col1, table1.c.col2),
{"name": "table1", "table": table1},
[
{
"name": "col1",
"type": table1.c.col1.type,
"expr": table1.c.col1,
},
{
"name": "col2",
"type": table1.c.col2.type,
"expr": table1.c.col2,
},
],
),
(
table1,
(func.count(table1.c.col1),),
{"name": "table1", "table": table1},
[
{
"name": None,
"type": testing.eq_type_affinity(Integer),
"expr": testing.eq_clause_element(
func.count(table1.c.col1)
),
},
],
),
(
table1,
None,
{"name": "table1", "table": table1},
[],
),
(
table1.alias("some_alias"),
None,
{
"name": "some_alias",
"table": testing.eq_clause_element(table1.alias("some_alias")),
},
[],
),
(
table1.join(table2),
None,
{
"name": None,
"table": testing.eq_clause_element(table1.join(table2)),
},
[],
),
argnames="entity, cols, expected_entity, expected_returning",
)
def test_dml_descriptions(
self, dml_construct, entity, cols, expected_entity, expected_returning
):
stmt = dml_construct(entity)
if cols:
stmt = stmt.returning(*cols)
eq_(stmt.entity_description, expected_entity)
eq_(expected_returning, stmt.returning_column_descriptions)
def test_indirect_correspondence_on_labels(self):
# this test depends upon 'distance' to
# get the right result
# same column three times
s = select(
table1.c.col1.label("c2"),
table1.c.col1,
table1.c.col1.label("c1"),
).subquery()
# this tests the same thing as
# test_direct_correspondence_on_labels below -
# that the presence of label() affects the 'distance'
assert s.corresponding_column(table1.c.col1) is s.c.col1
assert s.corresponding_column(s.c.col1) is s.c.col1
assert s.corresponding_column(s.c.c1) is s.c.c1
def test_labeled_select_twice(self):
scalar_select = select(table1.c.col1).label("foo")
s1 = select(scalar_select)
s2 = select(scalar_select, scalar_select)
eq_(
s1.selected_columns.foo.proxy_set,
{s1.selected_columns.foo, scalar_select, scalar_select.element},
)
eq_(
s2.selected_columns.foo.proxy_set,
{s2.selected_columns.foo, scalar_select, scalar_select.element},
)
assert (
s1.corresponding_column(scalar_select) is s1.selected_columns.foo
)
assert (
s2.corresponding_column(scalar_select) is s2.selected_columns.foo
)
def test_labeled_subquery_twice(self):
scalar_select = select(table1.c.col1).label("foo")
s1 = select(scalar_select).subquery()
s2 = select(scalar_select, scalar_select).subquery()
eq_(
s1.c.foo.proxy_set,
{s1.c.foo, scalar_select, scalar_select.element},
)
eq_(
s2.c.foo.proxy_set,
{s2.c.foo, scalar_select, scalar_select.element},
)
assert s1.corresponding_column(scalar_select) is s1.c.foo
assert s2.corresponding_column(scalar_select) is s2.c.foo
def test_labels_name_w_separate_key(self):
label = select(table1.c.col1).label("foo")
label.key = "bar"
s1 = select(label)
assert s1.corresponding_column(label) is s1.selected_columns.bar
# renders as foo
self.assert_compile(
s1, "SELECT (SELECT table1.col1 FROM table1) AS foo"
)
@testing.combinations(("cte",), ("subquery",), argnames="type_")
@testing.combinations(
("onelevel",), ("twolevel",), ("middle",), argnames="path"
)
@testing.combinations((True,), (False,), argnames="require_embedded")
def test_subquery_cte_correspondence(self, type_, require_embedded, path):
stmt = select(table1)
if type_ == "cte":
cte1 = stmt.cte()
elif type_ == "subquery":
cte1 = stmt.subquery()
if path == "onelevel":
is_(
cte1.corresponding_column(
table1.c.col1, require_embedded=require_embedded
),
cte1.c.col1,
)
elif path == "twolevel":
cte2 = cte1.alias()
is_(
cte2.corresponding_column(
table1.c.col1, require_embedded=require_embedded
),
cte2.c.col1,
)
elif path == "middle":
cte2 = cte1.alias()
is_(
cte2.corresponding_column(
cte1.c.col1, require_embedded=require_embedded
),
cte2.c.col1,
)
def test_labels_anon_w_separate_key(self):
label = select(table1.c.col1).label(None)
label.key = "bar"
s1 = select(label)
# .bar is there
assert s1.corresponding_column(label) is s1.selected_columns.bar
# renders as anon_1
self.assert_compile(
s1, "SELECT (SELECT table1.col1 FROM table1) AS anon_1"
)
def test_labels_anon_w_separate_key_subquery(self):
label = select(table1.c.col1).label(None)
label.key = label._tq_key_label = "bar"
s1 = select(label)
subq = s1.subquery()
s2 = select(subq).where(subq.c.bar > 5)
self.assert_compile(
s2,
"SELECT anon_2.anon_1 FROM (SELECT (SELECT table1.col1 "
"FROM table1) AS anon_1) AS anon_2 "
"WHERE anon_2.anon_1 > :param_1",
checkparams={"param_1": 5},
)
def test_labels_anon_generate_binds_subquery(self):
label = select(table1.c.col1).label(None)
label.key = label._tq_key_label = "bar"
s1 = select(label)
subq = s1.subquery()
s2 = select(subq).where(subq.c[0] > 5)
self.assert_compile(
s2,
"SELECT anon_2.anon_1 FROM (SELECT (SELECT table1.col1 "
"FROM table1) AS anon_1) AS anon_2 "
"WHERE anon_2.anon_1 > :param_1",
checkparams={"param_1": 5},
)
@testing.combinations((True,), (False,))
def test_broken_select_same_named_explicit_cols(self, use_anon):
"""test for #6090. the query is "wrong" and we dont know how
# to render this right now.
"""
stmt = select(
table1.c.col1,
table1.c.col2,
literal_column("col2").label(None if use_anon else "col2"),
).select_from(table1)
if use_anon:
self.assert_compile(
select(stmt.subquery()),
"SELECT anon_1.col1, anon_1.col2, anon_1.col2_1 FROM "
"(SELECT table1.col1 AS col1, table1.col2 AS col2, "
"col2 AS col2_1 FROM table1) AS anon_1",
)
else:
# the keys here are not critical as they are not what was
# requested anyway, maybe should raise here also.
eq_(stmt.selected_columns.keys(), ["col1", "col2", "col2_1"])
with expect_raises_message(
exc.InvalidRequestError,
"Label name col2 is being renamed to an anonymous "
"label due to "
"disambiguation which is not supported right now. Please use "
"unique names for explicit labels.",
):
select(stmt.subquery()).compile()
def test_same_anon_named_explicit_cols(self):
"""test for #8569. This adjusts the change in #6090 to not apply
to anonymous labels.
"""
lc = literal_column("col2").label(None)
subq1 = select(lc).subquery()
stmt2 = select(subq1, lc).subquery()
self.assert_compile(
select(stmt2),
"SELECT anon_1.col2_1, anon_1.col2_1_1 FROM "
"(SELECT anon_2.col2_1 AS col2_1, col2 AS col2_1 FROM "
"(SELECT col2 AS col2_1) AS anon_2) AS anon_1",
)
def test_correlate_none_arg_error(self):
stmt = select(table1)
with expect_raises_message(
exc.ArgumentError,
"additional FROM objects not accepted when passing "
"None/False to correlate",
):
stmt.correlate(None, table2)
def test_correlate_except_none_arg_error(self):
stmt = select(table1)
with expect_raises_message(
exc.ArgumentError,
"additional FROM objects not accepted when passing "
"None/False to correlate_except",
):
stmt.correlate_except(None, table2)
def test_select_label_grouped_still_corresponds(self):
label = select(table1.c.col1).label("foo")
label2 = label.self_group()
s1 = select(label)
s2 = select(label2)
assert s1.corresponding_column(label) is s1.selected_columns.foo
assert s2.corresponding_column(label) is s2.selected_columns.foo
def test_subquery_label_grouped_still_corresponds(self):
label = select(table1.c.col1).label("foo")
label2 = label.self_group()
s1 = select(label).subquery()
s2 = select(label2).subquery()
assert s1.corresponding_column(label) is s1.c.foo
assert s2.corresponding_column(label) is s2.c.foo
def test_direct_correspondence_on_labels(self):
# this test depends on labels being part
# of the proxy set to get the right result
l1, l2 = table1.c.col1.label("foo"), table1.c.col1.label("bar")
sel = select(l1, l2)
sel2 = sel.alias()
assert sel2.corresponding_column(l1) is sel2.c.foo
assert sel2.corresponding_column(l2) is sel2.c.bar
sel2 = select(table1.c.col1.label("foo"), table1.c.col2.label("bar"))
sel3 = sel.union(sel2).alias()
assert sel3.corresponding_column(l1) is sel3.c.foo
assert sel3.corresponding_column(l2) is sel3.c.bar
def test_keyed_gen(self):
s = select(keyed)
eq_(s.selected_columns.colx.key, "colx")
eq_(s.selected_columns.colx.name, "x")
assert (
s.selected_columns.corresponding_column(keyed.c.colx)
is s.selected_columns.colx
)
assert (
s.selected_columns.corresponding_column(keyed.c.coly)
is s.selected_columns.coly
)
assert (
s.selected_columns.corresponding_column(keyed.c.z)
is s.selected_columns.z
)
sel2 = s.alias()
assert sel2.corresponding_column(keyed.c.colx) is sel2.c.colx
assert sel2.corresponding_column(keyed.c.coly) is sel2.c.coly
assert sel2.corresponding_column(keyed.c.z) is sel2.c.z
def test_keyed_label_gen(self):
s = select(keyed).set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
assert (
s.selected_columns.corresponding_column(keyed.c.colx)
is s.selected_columns.keyed_colx
)
assert (
s.selected_columns.corresponding_column(keyed.c.coly)
is s.selected_columns.keyed_coly
)
assert (
s.selected_columns.corresponding_column(keyed.c.z)
is s.selected_columns.keyed_z
)
sel2 = s.alias()
assert sel2.corresponding_column(keyed.c.colx) is sel2.c.keyed_colx
assert sel2.corresponding_column(keyed.c.coly) is sel2.c.keyed_coly
assert sel2.corresponding_column(keyed.c.z) is sel2.c.keyed_z
def test_keyed_c_collection_upper(self):
c = Column("foo", Integer, key="bar")
t = Table("t", MetaData(), c)
is_(t.c.bar, c)
def test_keyed_c_collection_lower(self):
c = column("foo")
c.key = "bar"
t = table("t", c)
is_(t.c.bar, c)
def test_clone_c_proxy_key_upper(self):
c = Column("foo", Integer, key="bar")
t = Table("t", MetaData(), c)
s = select(t)._clone()
assert c in s.selected_columns.bar.proxy_set
s = select(t).subquery()._clone()
assert c in s.c.bar.proxy_set
def test_clone_c_proxy_key_lower(self):
c = column("foo")
c.key = "bar"
t = table("t", c)
s = select(t)._clone()
assert c in s.selected_columns.bar.proxy_set
s = select(t).subquery()._clone()
assert c in s.c.bar.proxy_set
def test_no_error_on_unsupported_expr_key(self):
from sqlalchemy.sql.expression import BinaryExpression
def myop(x, y):
pass
t = table("t", column("x"), column("y"))
expr = BinaryExpression(t.c.x, t.c.y, myop)
s = select(t, expr)
# anon_label, e.g. a truncated_label, is used here because
# the expr has no name, no key, and myop() can't create a
# string, so this is the last resort
eq_(s.selected_columns.keys(), ["x", "y", "_no_label"])
s = select(t, expr).subquery()
eq_(s.c.keys(), ["x", "y", "_no_label"])
def test_cloned_intersection(self):
t1 = table("t1", column("x"))
t2 = table("t2", column("x"))
s1 = t1.select()
s2 = t2.select()
s3 = t1.select()
s1c1 = s1._clone()
s1c2 = s1._clone()
s2c1 = s2._clone()
s3c1 = s3._clone()
eq_(base._cloned_intersection([s1c1, s3c1], [s2c1, s1c2]), {s1c1})
def test_cloned_difference(self):
t1 = table("t1", column("x"))
t2 = table("t2", column("x"))
s1 = t1.select()
s2 = t2.select()
s3 = t1.select()
s1c1 = s1._clone()
s1c2 = s1._clone()
s2c1 = s2._clone()
s3c1 = s3._clone()
eq_(
base._cloned_difference([s1c1, s2c1, s3c1], [s2c1, s1c2]),
{s3c1},
)
def test_distance_on_aliases(self):
a1 = table1.alias("a1")
for s in (
select(a1, table1)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.subquery(),
select(table1, a1)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.subquery(),
):
assert s.corresponding_column(table1.c.col1) is s.c.table1_col1
assert s.corresponding_column(a1.c.col1) is s.c.a1_col1
def test_join_against_self(self):
jj = select(table1.c.col1.label("bar_col1")).subquery()
jjj = join(table1, jj, table1.c.col1 == jj.c.bar_col1)
# test column directly against itself
# joins necessarily have to prefix column names with the name
# of the selectable, else the same-named columns will overwrite
# one another. In this case, we unfortunately have this
# unfriendly "anonymous" name, whereas before when select() could
# be a FROM the "bar_col1" label would be directly in the join()
# object. However this was a useless join() object because PG and
# MySQL don't accept unnamed subqueries in joins in any case.
name = "%s_bar_col1" % (jj.name,)
assert jjj.corresponding_column(jjj.c.table1_col1) is jjj.c.table1_col1
assert jjj.corresponding_column(jj.c.bar_col1) is jjj.c[name]
# test alias of the join
j2 = (
jjj.select()
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.subquery("foo")
)
assert j2.corresponding_column(table1.c.col1) is j2.c.table1_col1
def test_clone_append_column(self):
sel = select(literal_column("1").label("a"))
eq_(list(sel.selected_columns.keys()), ["a"])
cloned = visitors.ReplacingCloningVisitor().traverse(sel)
cloned.add_columns.non_generative(
cloned, literal_column("2").label("b")
)
cloned.add_columns.non_generative(cloned, func.foo())
eq_(list(cloned.selected_columns.keys()), ["a", "b", "foo"])
def test_clone_col_list_changes_then_proxy(self):
t = table("t", column("q"), column("p"))
stmt = select(t.c.q).subquery()
def add_column(stmt):
stmt.add_columns.non_generative(stmt, t.c.p)
stmt2 = visitors.cloned_traverse(stmt, {}, {"select": add_column})
eq_(list(stmt.c.keys()), ["q"])
eq_(list(stmt2.c.keys()), ["q", "p"])
def test_clone_col_list_changes_then_schema_proxy(self):
t = Table("t", MetaData(), Column("q", Integer), Column("p", Integer))
stmt = select(t.c.q).subquery()
def add_column(stmt):
stmt.add_columns.non_generative(stmt, t.c.p)
stmt2 = visitors.cloned_traverse(stmt, {}, {"select": add_column})
eq_(list(stmt.c.keys()), ["q"])
eq_(list(stmt2.c.keys()), ["q", "p"])
@testing.combinations(
func.now(), null(), true(), false(), literal_column("10"), column("x")
)
def test_const_object_correspondence(self, c):
"""test #7154"""
stmt = select(c).subquery()
stmt2 = select(stmt)
is_(
stmt2.selected_columns.corresponding_column(c),
stmt2.selected_columns[0],
)
def test_append_column_after_visitor_replace(self):
# test for a supported idiom that matches the deprecated / removed
# replace_selectable method
basesel = select(literal_column("1").label("a"))
tojoin = select(
literal_column("1").label("a"), literal_column("2").label("b")
)
basefrom = basesel.alias("basefrom")
joinfrom = tojoin.alias("joinfrom")
sel = select(basefrom.c.a)
replace_from = basefrom.join(joinfrom, basefrom.c.a == joinfrom.c.a)
def replace(elem):
if elem is basefrom:
return replace_from
replaced = visitors.replacement_traverse(sel, {}, replace)
self.assert_compile(
replaced,
"SELECT basefrom.a FROM (SELECT 1 AS a) AS basefrom "
"JOIN (SELECT 1 AS a, 2 AS b) AS joinfrom "
"ON basefrom.a = joinfrom.a",
)
replaced.selected_columns
replaced.add_columns.non_generative(replaced, joinfrom.c.b)
self.assert_compile(
replaced,
"SELECT basefrom.a, joinfrom.b FROM (SELECT 1 AS a) AS basefrom "
"JOIN (SELECT 1 AS a, 2 AS b) AS joinfrom "
"ON basefrom.a = joinfrom.a",
)
@testing.combinations(
("_internal_subquery",),
("selected_columns",),
("_all_selected_columns"),
)
def test_append_column_after_legacy_subq(self, attr):
"""test :ticket:`6261`"""
t1 = table("t1", column("a"), column("b"))
s1 = select(t1.c.a)
if attr == "selected_columns":
s1.selected_columns
elif attr == "_internal_subuqery":
with testing.expect_deprecated("The SelectBase.c"):
s1.c
elif attr == "_all_selected_columns":
s1._all_selected_columns
s1.add_columns.non_generative(s1, t1.c.b)
self.assert_compile(s1, "SELECT t1.a, t1.b FROM t1")
def test_against_cloned_non_table(self):
# test that corresponding column digs across
# clone boundaries with anonymous labeled elements
col = func.count().label("foo")
sel = select(col).subquery()
sel2 = visitors.ReplacingCloningVisitor().traverse(sel)
assert sel2.corresponding_column(col) is sel2.c.foo
sel3 = visitors.ReplacingCloningVisitor().traverse(sel2)
assert sel3.corresponding_column(col) is sel3.c.foo
def test_with_only_generative(self):
s1 = table1.select().scalar_subquery()
self.assert_compile(
s1.with_only_columns(s1),
"SELECT (SELECT table1.col1, table1.col2, "
"table1.col3, table1.colx FROM table1) AS anon_1",
)
def test_reduce_cols_odd_expressions(self):
"""test util.reduce_columns() works with text, non-col expressions
in a SELECT.
found_during_type_annotation
"""
stmt = select(
table1.c.col1,
table1.c.col3 * 5,
text("some_expr"),
table2.c.col2,
func.foo(),
).join(table2)
self.assert_compile(
stmt.reduce_columns(only_synonyms=False),
"SELECT table1.col1, table1.col3 * :col3_1 AS anon_1, "
"some_expr, foo() AS foo_1 FROM table1 JOIN table2 "
"ON table1.col1 = table2.col2",
)
def test_with_only_generative_no_list(self):
s1 = table1.select().scalar_subquery()
with testing.expect_raises_message(
exc.ArgumentError,
r"The \"entities\" argument to "
r"Select.with_only_columns\(\), when referring "
"to a sequence of items, is now passed",
):
s1.with_only_columns([s1])
@testing.combinations(
(
[table1.c.col1],
[table1.join(table2)],
[table1.join(table2)],
[table1],
),
([table1], [table2], [table2, table1], [table1]),
(
[table1.c.col1, table2.c.col1],
[],
[table1, table2],
[table1, table2],
),
)
def test_froms_accessors(
self, cols_expr, select_from, exp_final_froms, exp_cc_froms
):
"""tests for #6808"""
s1 = select(*cols_expr).select_from(*select_from)
for ff, efp in zip_longest(s1.get_final_froms(), exp_final_froms):
assert ff.compare(efp)
eq_(s1.columns_clause_froms, exp_cc_froms)
def test_scalar_subquery_from_subq_same_source(self):
s1 = select(table1.c.col1)
for i in range(2):
stmt = s1.subquery().select().scalar_subquery()
self.assert_compile(
stmt,
"(SELECT anon_1.col1 FROM "
"(SELECT table1.col1 AS col1 FROM table1) AS anon_1)",
)
def test_type_coerce_preserve_subq(self):
class MyType(TypeDecorator):
impl = Integer
cache_ok = True
stmt = select(type_coerce(column("x"), MyType).label("foo"))
subq = stmt.subquery()
stmt2 = subq.select()
subq2 = stmt2.subquery()
assert isinstance(stmt._raw_columns[0].type, MyType)
assert isinstance(subq.c.foo.type, MyType)
assert isinstance(stmt2.selected_columns.foo.type, MyType)
assert isinstance(subq2.c.foo.type, MyType)
def test_type_coerce_selfgroup(self):
no_group = column("a") // type_coerce(column("x"), Integer)
group = column("b") // type_coerce(column("y") * column("w"), Integer)
self.assert_compile(no_group, "a / x")
self.assert_compile(group, "b / (y * w)")
def test_subquery_on_table(self):
sel = (
select(table1, table2)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.subquery()
)
assert sel.corresponding_column(table1.c.col1) is sel.c.table1_col1
assert (
sel.corresponding_column(table1.c.col1, require_embedded=True)
is sel.c.table1_col1
)
assert table1.corresponding_column(sel.c.table1_col1) is table1.c.col1
assert (
table1.corresponding_column(
sel.c.table1_col1, require_embedded=True
)
is None
)
def test_join_against_join(self):
j = outerjoin(table1, table2, table1.c.col1 == table2.c.col2)
jj = (
select(table1.c.col1.label("bar_col1"))
.select_from(j)
.alias(name="foo")
)
jjj = join(table1, jj, table1.c.col1 == jj.c.bar_col1)
assert jjj.corresponding_column(jjj.c.table1_col1) is jjj.c.table1_col1
j2 = jjj._anonymous_fromclause("foo")
assert j2.corresponding_column(jjj.c.table1_col1) is j2.c.table1_col1
assert jjj.corresponding_column(jj.c.bar_col1) is jj.c.bar_col1
def test_table_alias(self):
a = table1.alias("a")
j = join(a, table2)
criterion = a.c.col1 == table2.c.col2
self.assert_(criterion.compare(j.onclause))
def test_join_doesnt_derive_from_onclause(self):
# test issue #4621. the hide froms from the join comes from
# Join._from_obj(), which should not include tables in the ON clause
t1 = table("t1", column("a"))
t2 = table("t2", column("b"))
t3 = table("t3", column("c"))
t4 = table("t4", column("d"))
j = t1.join(t2, onclause=t1.c.a == t3.c.c)
j2 = t4.join(j, onclause=t4.c.d == t2.c.b)
stmt = select(t1, t2, t3, t4).select_from(j2)
self.assert_compile(
stmt,
"SELECT t1.a, t2.b, t3.c, t4.d FROM "
"t4 JOIN (t1 JOIN t2 ON t1.a = t3.c) ON t4.d = t2.b, t3",
)
stmt = select(t1).select_from(t3).select_from(j2)
self.assert_compile(
stmt,
"SELECT t1.a FROM t3, t4 JOIN (t1 JOIN t2 ON t1.a = t3.c) "
"ON t4.d = t2.b",
)
@testing.fails("not supported with rework, need a new approach")
def test_alias_handles_column_context(self):
# not quite a use case yet but this is expected to become
# prominent w/ PostgreSQL's tuple functions
stmt = select(table1.c.col1, table1.c.col2)
a = stmt.alias("a")
# TODO: this case is crazy, sending SELECT or FROMCLAUSE has to
# be figured out - is it a scalar row query? what kinds of
# statements go into functions in PG. seems likely select statement,
# but not alias, subquery or other FROM object
self.assert_compile(
select(func.foo(a)),
"SELECT foo(SELECT table1.col1, table1.col2 FROM table1) "
"AS foo_1 FROM "
"(SELECT table1.col1 AS col1, table1.col2 AS col2 FROM table1) "
"AS a",
)
def test_union_correspondence(self):
# tests that we can correspond a column in a Select statement
# with a certain Table, against a column in a Union where one of
# its underlying Selects matches to that same Table
u = select(
table1.c.col1,
table1.c.col2,
table1.c.col3,
table1.c.colx,
null().label("coly"),
).union(
select(
table2.c.col1,
table2.c.col2,
table2.c.col3,
null().label("colx"),
table2.c.coly,
)
)
s1 = table1.select().set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
s2 = table2.select().set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
assert (
u.corresponding_column(s1.selected_columns.table1_col2)
is u.selected_columns.col2
)
# right now, the "selected_columns" of a union are those of the
# first selectable. so without using a subquery that represents
# all the SELECTs in the union, we can't do corresponding column
# like this. perhaps compoundselect shouldn't even implement
# .corresponding_column directly
assert (
u.corresponding_column(s2.selected_columns.table2_col2) is None
) # really? u.selected_columns.col2
usub = u.subquery()
assert (
usub.corresponding_column(s1.selected_columns.table1_col2)
is usub.c.col2
)
assert (
usub.corresponding_column(s2.selected_columns.table2_col2)
is usub.c.col2
)
s1sub = s1.subquery()
s2sub = s2.subquery()
assert usub.corresponding_column(s1sub.c.table1_col2) is usub.c.col2
assert usub.corresponding_column(s2sub.c.table2_col2) is usub.c.col2
def test_union_precedence(self):
# conflicting column correspondence should be resolved based on
# the order of the select()s in the union
s1 = select(table1.c.col1, table1.c.col2)
s2 = select(table1.c.col2, table1.c.col1)
s3 = select(table1.c.col3, table1.c.colx)
s4 = select(table1.c.colx, table1.c.col3)
u1 = union(s1, s2).subquery()
assert u1.corresponding_column(table1.c.col1) is u1.c.col1
assert u1.corresponding_column(table1.c.col2) is u1.c.col2
u1 = union(s1, s2, s3, s4).subquery()
assert u1.corresponding_column(table1.c.col1) is u1.c.col1
assert u1.corresponding_column(table1.c.col2) is u1.c.col2
assert u1.corresponding_column(table1.c.colx) is u1.c.col2
assert u1.corresponding_column(table1.c.col3) is u1.c.col1
def test_proxy_set_pollution(self):
s1 = select(table1.c.col1, table1.c.col2)
s2 = select(table1.c.col2, table1.c.col1)
for c in s1.selected_columns:
c.proxy_set
for c in s2.selected_columns:
c.proxy_set
u1 = union(s1, s2).subquery()
assert u1.corresponding_column(table1.c.col2) is u1.c.col2
def test_singular_union(self):
u = union(
select(table1.c.col1, table1.c.col2, table1.c.col3),
select(table1.c.col1, table1.c.col2, table1.c.col3),
)
u = union(select(table1.c.col1, table1.c.col2, table1.c.col3))
assert u.selected_columns.col1 is not None
assert u.selected_columns.col2 is not None
assert u.selected_columns.col3 is not None
def test_alias_union(self):
# same as testunion, except its an alias of the union
u = (
select(
table1.c.col1,
table1.c.col2,
table1.c.col3,
table1.c.colx,
null().label("coly"),
)
.union(
select(
table2.c.col1,
table2.c.col2,
table2.c.col3,
null().label("colx"),
table2.c.coly,
)
)
.alias("analias")
)
s1 = (
table1.select()
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.subquery()
)
s2 = (
table2.select()
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.subquery()
)
assert u.corresponding_column(s1.c.table1_col2) is u.c.col2
assert u.corresponding_column(s2.c.table2_col2) is u.c.col2
assert u.corresponding_column(s2.c.table2_coly) is u.c.coly
assert s2.corresponding_column(u.c.coly) is s2.c.table2_coly
def test_union_of_alias(self):
s1 = select(table1.c.col1, table1.c.col2)
s2 = select(table1.c.col1, table1.c.col2).alias()
# previously this worked
assert_raises_message(
exc.ArgumentError,
"SELECT construct for inclusion in a UNION or "
"other set construct expected",
union,
s1,
s2,
)
def test_union_of_text(self):
s1 = select(table1.c.col1, table1.c.col2)
s2 = text("select col1, col2 from foo").columns(
column("col1"), column("col2")
)
u1 = union(s1, s2).subquery()
assert u1.corresponding_column(s1.selected_columns.col1) is u1.c.col1
assert u1.corresponding_column(s2.selected_columns.col1) is u1.c.col1
u2 = union(s2, s1).subquery()
assert u2.corresponding_column(s1.selected_columns.col1) is u2.c.col1
assert u2.corresponding_column(s2.selected_columns.col1) is u2.c.col1
def test_union_alias_misc(self):
s1 = select(table1.c.col1, table1.c.col2)
s2 = select(table1.c.col2, table1.c.col1)
u1 = union(s1, s2).subquery()
assert u1.corresponding_column(table1.c.col2) is u1.c.col2
metadata = MetaData()
table1_new = Table(
"table1",
metadata,
Column("col1", Integer, primary_key=True),
Column("col2", String(20)),
Column("col3", Integer),
Column("colx", Integer),
)
# table1_new = table1
s1 = select(table1_new.c.col1, table1_new.c.col2)
s2 = select(table1_new.c.col2, table1_new.c.col1)
u1 = union(s1, s2).subquery()
# TODO: failing due to proxy_set not correct
assert u1.corresponding_column(table1_new.c.col2) is u1.c.col2
def test_unnamed_exprs_keys(self):
s1 = select(
table1.c.col1 == 5,
table1.c.col1 == 10,
func.count(table1.c.col1),
literal_column("x"),
)
# the reason we return "_no_label" is because we dont have a system
# right now that is guaranteed to use the identical label in
# selected_columns as will be used when we compile the statement, and
# this includes the creation of _result_map right now which gets loaded
# with lots of unprocessed anon symbols for these kinds of cases,
# and we don't have a fully comprehensive approach for this to always
# do the right thing; as it is *vastly* simpler for the user to please
# use a label(), "_no_label" is meant to encourage this rather than
# relying on a system that we don't fully have on this end.
eq_(s1.subquery().c.keys(), ["_no_label", "_no_label_1", "count", "x"])
self.assert_compile(
s1,
"SELECT table1.col1 = :col1_1 AS anon_1, "
"table1.col1 = :col1_2 AS anon_2, count(table1.col1) AS count_1, "
"x FROM table1",
)
eq_(
s1.selected_columns.keys(),
["_no_label", "_no_label_1", "count", "x"],
)
eq_(
select(s1.subquery()).selected_columns.keys(),
["_no_label", "_no_label_1", "_no_label_2", "x"],
)
self.assert_compile(
select(s1.subquery()),
"SELECT anon_2.anon_1, anon_2.anon_3, anon_2.count_1, anon_2.x "
"FROM (SELECT table1.col1 = :col1_1 AS anon_1, "
"table1.col1 = :col1_2 AS anon_3, "
"count(table1.col1) AS count_1, x FROM table1) AS anon_2",
)
def test_union_alias_dupe_keys(self):
s1 = select(table1.c.col1, table1.c.col2, table2.c.col1)
s2 = select(table2.c.col1, table2.c.col2, table2.c.col3)
u1 = union(s1, s2).subquery()
assert (
u1.corresponding_column(s1.selected_columns._all_columns[0])
is u1.c._all_columns[0]
)
# col1 is taken by the first "col1" in the list
assert u1.c.col1 is u1.c._all_columns[0]
# table2.c.col1 is in two positions in this union, so...currently
# it is the replaced one at position 2.
assert u1.corresponding_column(table2.c.col1) is u1.c._all_columns[2]
# this is table2.c.col1, which in the first selectable is in position 2
assert u1.corresponding_column(s2.selected_columns.col1) is u1.c[2]
# same
assert u1.corresponding_column(s2.subquery().c.col1) is u1.c[2]
# col2 is working OK
assert u1.corresponding_column(s1.selected_columns.col2) is u1.c.col2
assert (
u1.corresponding_column(s1.selected_columns.col2)
is u1.c._all_columns[1]
)
assert u1.corresponding_column(s2.selected_columns.col2) is u1.c.col2
assert (
u1.corresponding_column(s2.selected_columns.col2)
is u1.c._all_columns[1]
)
assert u1.corresponding_column(s2.subquery().c.col2) is u1.c.col2
# col3 is also "correct"
assert u1.corresponding_column(s2.selected_columns.col3) is u1.c[2]
assert u1.corresponding_column(table1.c.col1) is u1.c._all_columns[0]
assert u1.corresponding_column(table1.c.col2) is u1.c._all_columns[1]
assert u1.corresponding_column(table2.c.col1) is u1.c._all_columns[2]
assert u1.corresponding_column(table2.c.col2) is u1.c._all_columns[1]
assert u1.corresponding_column(table2.c.col3) is u1.c._all_columns[2]
def test_union_alias_dupe_keys_disambiguates_in_subq_compile_one(self):
s1 = select(table1.c.col1, table1.c.col2, table2.c.col1).limit(1)
s2 = select(table2.c.col1, table2.c.col2, table2.c.col3).limit(1)
u1 = union(s1, s2).subquery()
eq_(u1.c.keys(), ["col1", "col2", "col1_1"])
stmt = select(u1)
eq_(stmt.selected_columns.keys(), ["col1", "col2", "col1_1"])
# the union() sets a new labeling form in the first SELECT
self.assert_compile(
stmt,
"SELECT anon_1.col1, anon_1.col2, anon_1.col1_1 FROM "
"((SELECT table1.col1 AS col1, table1.col2 AS col2, table2.col1 "
"AS col1_1 "
"FROM table1, table2 LIMIT :param_1) UNION "
"(SELECT table2.col1 AS col1, table2.col2 AS col2, "
"table2.col3 AS col3 FROM table2 "
"LIMIT :param_2)) AS anon_1",
)
def test_union_alias_dupe_keys_disambiguates_in_subq_compile_two(self):
a = table("a", column("id"))
b = table("b", column("id"), column("aid"))
d = table("d", column("id"), column("aid"))
u1 = union(
a.join(b, a.c.id == b.c.aid)
.select()
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL),
a.join(d, a.c.id == d.c.aid)
.select()
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL),
).alias()
eq_(u1.c.keys(), ["a_id", "b_id", "b_aid"])
stmt = select(u1)
eq_(stmt.selected_columns.keys(), ["a_id", "b_id", "b_aid"])
# the union() detects that the first SELECT already has a labeling
# style and uses that
self.assert_compile(
stmt,
"SELECT anon_1.a_id, anon_1.b_id, anon_1.b_aid FROM "
"(SELECT a.id AS a_id, b.id AS b_id, b.aid AS b_aid "
"FROM a JOIN b ON a.id = b.aid "
"UNION SELECT a.id AS a_id, d.id AS d_id, d.aid AS d_aid "
"FROM a JOIN d ON a.id = d.aid) AS anon_1",
)
def test_union_alias_dupe_keys_grouped(self):
s1 = select(table1.c.col1, table1.c.col2, table2.c.col1).limit(1)
s2 = select(table2.c.col1, table2.c.col2, table2.c.col3).limit(1)
u1 = union(s1, s2).subquery()
assert (
u1.corresponding_column(s1.selected_columns._all_columns[0])
is u1.c._all_columns[0]
)
# col1 is taken by the first "col1" in the list
assert u1.c.col1 is u1.c._all_columns[0]
# table2.c.col1 is in two positions in this union, so...currently
# it is the replaced one at position 2.
assert u1.corresponding_column(table2.c.col1) is u1.c._all_columns[2]
# this is table2.c.col1, which in the first selectable is in position 2
assert u1.corresponding_column(s2.selected_columns.col1) is u1.c[2]
# same
assert u1.corresponding_column(s2.subquery().c.col1) is u1.c[2]
# col2 is working OK
assert u1.corresponding_column(s1.selected_columns.col2) is u1.c.col2
assert (
u1.corresponding_column(s1.selected_columns.col2)
is u1.c._all_columns[1]
)
assert u1.corresponding_column(s2.selected_columns.col2) is u1.c.col2
assert (
u1.corresponding_column(s2.selected_columns.col2)
is u1.c._all_columns[1]
)
assert u1.corresponding_column(s2.subquery().c.col2) is u1.c.col2
# col3 is also "correct"
assert u1.corresponding_column(s2.selected_columns.col3) is u1.c[2]
assert u1.corresponding_column(table1.c.col1) is u1.c._all_columns[0]
assert u1.corresponding_column(table1.c.col2) is u1.c._all_columns[1]
assert u1.corresponding_column(table2.c.col1) is u1.c._all_columns[2]
assert u1.corresponding_column(table2.c.col2) is u1.c._all_columns[1]
assert u1.corresponding_column(table2.c.col3) is u1.c._all_columns[2]
def test_select_union(self):
# like testaliasunion, but off a Select off the union.
u = (
select(
table1.c.col1,
table1.c.col2,
table1.c.col3,
table1.c.colx,
null().label("coly"),
)
.union(
select(
table2.c.col1,
table2.c.col2,
table2.c.col3,
null().label("colx"),
table2.c.coly,
)
)
.alias("analias")
)
s = select(u).subquery()
s1 = (
table1.select()
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.subquery()
)
s2 = (
table2.select()
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.subquery()
)
assert s.corresponding_column(s1.c.table1_col2) is s.c.col2
assert s.corresponding_column(s2.c.table2_col2) is s.c.col2
def test_union_against_join(self):
# same as testunion, except its an alias of the union
u = (
select(
table1.c.col1,
table1.c.col2,
table1.c.col3,
table1.c.colx,
null().label("coly"),
)
.union(
select(
table2.c.col1,
table2.c.col2,
table2.c.col3,
null().label("colx"),
table2.c.coly,
)
)
.alias("analias")
)
j1 = table1.join(table2)
assert u.corresponding_column(j1.c.table1_colx) is u.c.colx
assert j1.corresponding_column(u.c.colx) is j1.c.table1_colx
def test_join(self):
a = join(table1, table2)
print(str(a.select().set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)))
b = table2.alias("b")
j = join(a, b)
print(str(j))
criterion = a.c.table1_col1 == b.c.col2
self.assert_(criterion.compare(j.onclause))
def test_select_subquery_join(self):
a = table1.select().alias("a")
j = join(a, table2)
criterion = a.c.col1 == table2.c.col2
self.assert_(criterion.compare(j.onclause))
def test_subquery_labels_join(self):
a = (
table1.select()
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.subquery()
)
j = join(a, table2)
criterion = a.c.table1_col1 == table2.c.col2
self.assert_(criterion.compare(j.onclause))
def test_scalar_cloned_comparator(self):
sel = select(table1.c.col1).scalar_subquery()
sel == table1.c.col1
sel2 = visitors.ReplacingCloningVisitor().traverse(sel)
expr2 = sel2 == table1.c.col1
is_(expr2.left, sel2)
def test_column_labels(self):
a = select(
table1.c.col1.label("acol1"),
table1.c.col2.label("acol2"),
table1.c.col3.label("acol3"),
).subquery()
j = join(a, table2)
criterion = a.c.acol1 == table2.c.col2
self.assert_(criterion.compare(j.onclause))
def test_labeled_select_corresponding(self):
l1 = select(func.max(table1.c.col1)).label("foo")
s = select(l1)
eq_(s.corresponding_column(l1), s.selected_columns.foo)
s = select(table1.c.col1, l1)
eq_(s.corresponding_column(l1), s.selected_columns.foo)
def test_labeled_subquery_corresponding(self):
l1 = select(func.max(table1.c.col1)).label("foo")
s = select(l1).subquery()
eq_(s.corresponding_column(l1), s.c.foo)
s = select(table1.c.col1, l1).subquery()
eq_(s.corresponding_column(l1), s.c.foo)
def test_select_alias_labels(self):
a = (
table2.select()
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.alias("a")
)
j = join(a, table1)
criterion = table1.c.col1 == a.c.table2_col2
self.assert_(criterion.compare(j.onclause))
def test_table_joined_to_select_of_table(self):
metadata = MetaData()
a = Table("a", metadata, Column("id", Integer, primary_key=True))
j2 = select(a.c.id.label("aid")).alias("bar")
j3 = a.join(j2, j2.c.aid == a.c.id)
j4 = select(j3).alias("foo")
assert j4.corresponding_column(j2.c.aid) is j4.c.aid
assert j4.corresponding_column(a.c.id) is j4.c.id
@testing.combinations(True, False)
def test_two_metadata_join_raises(self, include_a_joining_table):
"""test case from 2008 enhanced as of #8101, more specific failure
modes for non-resolvable FKs
"""
m = MetaData()
m2 = MetaData()
t1 = Table("t1", m, Column("id", Integer), Column("id2", Integer))
if include_a_joining_table:
t2 = Table("t2", m, Column("id", Integer, ForeignKey("t1.id")))
t3 = Table("t3", m2, Column("id", Integer, ForeignKey("t1.id2")))
with expect_raises_message(
exc.NoReferencedTableError,
"Foreign key associated with column 't3.id'",
):
t3.join(t1)
if include_a_joining_table:
s = (
select(t2, t3)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.subquery()
)
else:
s = (
select(t3)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.subquery()
)
with expect_raises_message(
exc.NoReferencedTableError,
"Foreign key associated with column 'anon_1.t3_id' could not "
"find table 't1' with which to generate a foreign key to target "
"column 'id2'",
):
select(s.join(t1)),
# manual join is OK. using select().join() here is also exercising
# that join() does not need to resolve FKs if we provided the
# ON clause
if include_a_joining_table:
self.assert_compile(
select(s).join(
t1, and_(s.c.t2_id == t1.c.id, s.c.t3_id == t1.c.id)
),
"SELECT anon_1.t2_id, anon_1.t3_id FROM (SELECT "
"t2.id AS t2_id, t3.id AS t3_id FROM t2, t3) AS anon_1 "
"JOIN t1 ON anon_1.t2_id = t1.id AND anon_1.t3_id = t1.id",
)
else:
self.assert_compile(
select(s).join(t1, s.c.t3_id == t1.c.id),
"SELECT anon_1.t3_id FROM (SELECT t3.id AS t3_id FROM t3) "
"AS anon_1 JOIN t1 ON anon_1.t3_id = t1.id",
)
def test_multi_label_chain_naming_col(self):
# See [ticket:2167] for this one.
l1 = table1.c.col1.label("a")
l2 = select(l1).label("b")
s = select(l2).subquery()
assert s.c.b is not None
self.assert_compile(
s.select(),
"SELECT anon_1.b FROM "
"(SELECT (SELECT table1.col1 AS a FROM table1) AS b) AS anon_1",
)
s2 = select(s.element.label("c")).subquery()
self.assert_compile(
s2.select(),
"SELECT anon_1.c FROM (SELECT (SELECT ("
"SELECT table1.col1 AS a FROM table1) AS b) AS c) AS anon_1",
)
def test_self_referential_select_raises(self):
t = table("t", column("x"))
# this issue is much less likely as subquery() applies a labeling
# style to the select, eliminating the self-referential call unless
# the select already had labeling applied
s = select(t).set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
with testing.expect_deprecated("The SelectBase.c"):
s.where.non_generative(s, s.c.t_x > 5)
assert_raises_message(
exc.InvalidRequestError,
r"select\(\) construct refers to itself as a FROM",
s.compile,
)
def test_unusual_column_elements_text(self):
"""test that .c excludes text()."""
s = select(table1.c.col1, text("foo")).subquery()
eq_(list(s.c), [s.c.col1])
def test_unusual_column_elements_clauselist(self):
"""Test that raw ClauseList is expanded into .c."""
from sqlalchemy.sql.expression import ClauseList
s = select(
table1.c.col1, ClauseList(table1.c.col2, table1.c.col3)
).subquery()
eq_(list(s.c), [s.c.col1, s.c.col2, s.c.col3])
def test_unusual_column_elements_boolean_clauselist(self):
"""test that BooleanClauseList is placed as single element in .c."""
c2 = and_(table1.c.col2 == 5, table1.c.col3 == 4)
s = select(table1.c.col1, c2).subquery()
eq_(list(s.c), [s.c.col1, s.corresponding_column(c2)])
def test_from_list_deferred_constructor(self):
c1 = Column("c1", Integer)
c2 = Column("c2", Integer)
select(c1)
t = Table("t", MetaData(), c1, c2)
eq_(c1._from_objects, [t])
eq_(c2._from_objects, [t])
self.assert_compile(select(c1), "SELECT t.c1 FROM t")
self.assert_compile(select(c2), "SELECT t.c2 FROM t")
def test_from_list_deferred_whereclause(self):
c1 = Column("c1", Integer)
c2 = Column("c2", Integer)
select(c1).where(c1 == 5)
t = Table("t", MetaData(), c1, c2)
eq_(c1._from_objects, [t])
eq_(c2._from_objects, [t])
self.assert_compile(select(c1), "SELECT t.c1 FROM t")
self.assert_compile(select(c2), "SELECT t.c2 FROM t")
def test_from_list_deferred_fromlist(self):
m = MetaData()
t1 = Table("t1", m, Column("x", Integer))
c1 = Column("c1", Integer)
select(c1).where(c1 == 5).select_from(t1)
t2 = Table("t2", MetaData(), c1)
eq_(c1._from_objects, [t2])
self.assert_compile(select(c1), "SELECT t2.c1 FROM t2")
def test_from_list_deferred_cloning(self):
c1 = Column("c1", Integer)
c2 = Column("c2", Integer)
s = select(c1)
s2 = select(c2)
s3 = sql_util.ClauseAdapter(s).traverse(s2)
Table("t", MetaData(), c1, c2)
self.assert_compile(s3, "SELECT t.c2 FROM t")
def test_from_list_with_columns(self):
table1 = table("t1", column("a"))
table2 = table("t2", column("b"))
s1 = select(table1.c.a, table2.c.b)
self.assert_compile(s1, "SELECT t1.a, t2.b FROM t1, t2")
s2 = s1.with_only_columns(table2.c.b)
self.assert_compile(s2, "SELECT t2.b FROM t2")
s3 = sql_util.ClauseAdapter(table1).traverse(s1)
self.assert_compile(s3, "SELECT t1.a, t2.b FROM t1, t2")
s4 = s3.with_only_columns(table2.c.b)
self.assert_compile(s4, "SELECT t2.b FROM t2")
def test_from_list_against_existing_one(self):
c1 = Column("c1", Integer)
s = select(c1)
# force a compile.
self.assert_compile(s, "SELECT c1")
Table("t", MetaData(), c1)
self.assert_compile(s, "SELECT t.c1 FROM t")
def test_from_list_against_existing_two(self):
c1 = Column("c1", Integer)
c2 = Column("c2", Integer)
s = select(c1)
# force a compile.
eq_(str(s), "SELECT c1")
t = Table("t", MetaData(), c1, c2)
eq_(c1._from_objects, [t])
eq_(c2._from_objects, [t])
self.assert_compile(s, "SELECT t.c1 FROM t")
self.assert_compile(select(c1), "SELECT t.c1 FROM t")
self.assert_compile(select(c2), "SELECT t.c2 FROM t")
def test_label_gen_resets_on_table(self):
c1 = Column("c1", Integer)
eq_(c1._label, "c1")
Table("t1", MetaData(), c1)
eq_(c1._label, "t1_c1")
def test_no_alias_construct(self):
a = table("a", column("x"))
assert_raises_message(
NotImplementedError,
"The Alias class is not intended to be constructed directly. "
r"Please use the alias\(\) standalone function",
Alias,
a,
"foo",
)
def test_whereclause_adapted(self):
table1 = table("t1", column("a"))
s1 = select(table1).subquery()
s2 = select(s1).where(s1.c.a == 5)
assert s2._whereclause.left.table is s1
ta = select(table1).subquery()
s3 = sql_util.ClauseAdapter(ta).traverse(s2)
froms = list(s3._iterate_from_elements())
assert s1 not in froms
# these are new assumptions with the newer approach that
# actively swaps out whereclause and others
assert s3._whereclause.left.table is not s1
assert s3._whereclause.left.table in froms
def test_table_schema(self):
t = table("foo")
eq_(t.name, "foo")
eq_(t.fullname, "foo")
t = table("foo", schema="bar")
eq_(t.name, "foo")
eq_(t.fullname, "bar.foo")
class RefreshForNewColTest(fixtures.TestBase):
def test_join_uninit(self):
a = table("a", column("x"))
b = table("b", column("y"))
j = a.join(b, a.c.x == b.c.y)
q = column("q")
b.append_column(q)
j._refresh_for_new_column(q)
assert j.c.b_q is q
def test_join_init(self):
a = table("a", column("x"))
b = table("b", column("y"))
j = a.join(b, a.c.x == b.c.y)
j.c
q = column("q")
b.append_column(q)
j._refresh_for_new_column(q)
assert j.c.b_q is q
def test_join_samename_init(self):
a = table("a", column("x"))
b = table("b", column("y"))
j = a.join(b, a.c.x == b.c.y)
j.c
q = column("x")
b.append_column(q)
j._refresh_for_new_column(q)
assert j.c.b_x is q
def test_select_samename_init(self):
a = table("a", column("x"))
b = table("b", column("y"))
s = select(a, b).set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
s.selected_columns
q = column("x")
b.append_column(q)
s._refresh_for_new_column(q)
assert q in s.selected_columns.b_x.proxy_set
def test_alias_alias_samename_init(self):
a = table("a", column("x"))
b = table("b", column("y"))
s1 = (
select(a, b)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.alias()
)
s2 = s1.alias()
s1.c
s2.c
q = column("x")
b.append_column(q)
assert "_columns" in s2.__dict__
s2._refresh_for_new_column(q)
assert "_columns" not in s2.__dict__
is_(s1.corresponding_column(s2.c.b_x), s1.c.b_x)
def test_aliased_select_samename_uninit(self):
a = table("a", column("x"))
b = table("b", column("y"))
s = (
select(a, b)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.alias()
)
q = column("x")
b.append_column(q)
s._refresh_for_new_column(q)
assert q in s.c.b_x.proxy_set
def test_aliased_select_samename_init(self):
a = table("a", column("x"))
b = table("b", column("y"))
s = (
select(a, b)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.alias()
)
s.c
q = column("x")
b.append_column(q)
s._refresh_for_new_column(q)
assert q in s.c.b_x.proxy_set
def test_aliased_select_irrelevant(self):
a = table("a", column("x"))
b = table("b", column("y"))
c = table("c", column("z"))
s = (
select(a, b)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.alias()
)
s.c
q = column("x")
c.append_column(q)
s._refresh_for_new_column(q)
assert "c_x" not in s.c
def test_aliased_select_no_cols_clause(self):
a = table("a", column("x"))
s = (
select(a.c.x)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.alias()
)
s.c
q = column("q")
a.append_column(q)
s._refresh_for_new_column(q)
assert "a_q" not in s.c
def test_union_uninit(self):
a = table("a", column("x"))
s1 = select(a)
s2 = select(a)
s3 = s1.union(s2)
q = column("q")
a.append_column(q)
s3._refresh_for_new_column(q)
assert a.c.q in s3.selected_columns.q.proxy_set
def test_union_init(self):
a = table("a", column("x"))
s1 = select(a)
s2 = select(a)
s3 = s1.union(s2)
s3.selected_columns
q = column("q")
a.append_column(q)
s3._refresh_for_new_column(q)
assert a.c.q in s3.selected_columns.q.proxy_set
def test_nested_join_uninit(self):
a = table("a", column("x"))
b = table("b", column("y"))
c = table("c", column("z"))
j = a.join(b, a.c.x == b.c.y).join(c, b.c.y == c.c.z)
q = column("q")
b.append_column(q)
j._refresh_for_new_column(q)
assert j.c.b_q is q
def test_nested_join_init(self):
a = table("a", column("x"))
b = table("b", column("y"))
c = table("c", column("z"))
j = a.join(b, a.c.x == b.c.y).join(c, b.c.y == c.c.z)
j.c
q = column("q")
b.append_column(q)
j._refresh_for_new_column(q)
assert j.c.b_q is q
def test_fk_table(self):
m = MetaData()
fk = ForeignKey("x.id")
Table("x", m, Column("id", Integer))
a = Table("a", m, Column("x", Integer, fk))
a.c
q = Column("q", Integer)
a.append_column(q)
a._refresh_for_new_column(q)
eq_(a.foreign_keys, {fk})
fk2 = ForeignKey("g.id")
p = Column("p", Integer, fk2)
a.append_column(p)
a._refresh_for_new_column(p)
eq_(a.foreign_keys, {fk, fk2})
def test_fk_join(self):
m = MetaData()
fk = ForeignKey("x.id")
Table("x", m, Column("id", Integer))
a = Table("a", m, Column("x", Integer, fk))
b = Table("b", m, Column("y", Integer))
j = a.join(b, a.c.x == b.c.y)
j.c
q = Column("q", Integer)
b.append_column(q)
j._refresh_for_new_column(q)
eq_(j.foreign_keys, {fk})
fk2 = ForeignKey("g.id")
p = Column("p", Integer, fk2)
b.append_column(p)
j._refresh_for_new_column(p)
eq_(j.foreign_keys, {fk, fk2})
class AnonLabelTest(fixtures.TestBase):
"""Test behaviors fixed by [ticket:2168]."""
def test_anon_labels_named_column(self):
c1 = column("x")
assert c1.label(None) is not c1
eq_(str(select(c1.label(None))), "SELECT x AS x_1")
def test_anon_labels_literal_column(self):
c1 = literal_column("x")
assert c1.label(None) is not c1
eq_(str(select(c1.label(None))), "SELECT x AS x_1")
def test_anon_labels_func(self):
c1 = func.count("*")
assert c1.label(None) is not c1
eq_(str(select(c1)), "SELECT count(:count_2) AS count_1")
select(c1).compile()
eq_(str(select(c1.label(None))), "SELECT count(:count_2) AS count_1")
def test_named_labels_named_column(self):
c1 = column("x")
eq_(str(select(c1.label("y"))), "SELECT x AS y")
def test_named_labels_literal_column(self):
c1 = literal_column("x")
eq_(str(select(c1.label("y"))), "SELECT x AS y")
class JoinAnonymizingTest(fixtures.TestBase, AssertsCompiledSQL):
"""test anonymous_fromclause for aliases.
In 1.4 this function is only for ORM internal use. The public version
join.alias() is deprecated.
"""
__dialect__ = "default"
def test_flat_ok_on_non_join(self):
a = table("a", column("a"))
s = a.select()
self.assert_compile(
s.alias(flat=True).select(),
"SELECT anon_1.a FROM (SELECT a.a AS a FROM a) AS anon_1",
)
def test_join_alias(self):
a = table("a", column("a"))
b = table("b", column("b"))
self.assert_compile(
a.join(b, a.c.a == b.c.b)._anonymous_fromclause(),
"SELECT a.a AS a_a, b.b AS b_b FROM a JOIN b ON a.a = b.b",
)
def test_join_standalone_alias(self):
a = table("a", column("a"))
b = table("b", column("b"))
self.assert_compile(
a.join(b, a.c.a == b.c.b)._anonymous_fromclause(),
"SELECT a.a AS a_a, b.b AS b_b FROM a JOIN b ON a.a = b.b",
)
def test_join_alias_flat(self):
a = table("a", column("a"))
b = table("b", column("b"))
self.assert_compile(
a.join(b, a.c.a == b.c.b)._anonymous_fromclause(flat=True),
"a AS a_1 JOIN b AS b_1 ON a_1.a = b_1.b",
)
def test_join_standalone_alias_flat(self):
a = table("a", column("a"))
b = table("b", column("b"))
self.assert_compile(
a.join(b, a.c.a == b.c.b)._anonymous_fromclause(flat=True),
"a AS a_1 JOIN b AS b_1 ON a_1.a = b_1.b",
)
def test_composed_join_alias_flat(self):
a = table("a", column("a"))
b = table("b", column("b"))
c = table("c", column("c"))
d = table("d", column("d"))
j1 = a.join(b, a.c.a == b.c.b)
j2 = c.join(d, c.c.c == d.c.d)
# note in 1.4 the flat=True flag now descends into the whole join,
# as it should
self.assert_compile(
j1.join(j2, b.c.b == c.c.c)._anonymous_fromclause(flat=True),
"a AS a_1 JOIN b AS b_1 ON a_1.a = b_1.b JOIN "
"(c AS c_1 JOIN d AS d_1 ON c_1.c = d_1.d) "
"ON b_1.b = c_1.c",
)
def test_composed_join_alias(self):
a = table("a", column("a"))
b = table("b", column("b"))
c = table("c", column("c"))
d = table("d", column("d"))
j1 = a.join(b, a.c.a == b.c.b)
j2 = c.join(d, c.c.c == d.c.d)
self.assert_compile(
select(j1.join(j2, b.c.b == c.c.c)._anonymous_fromclause()),
"SELECT anon_1.a_a, anon_1.b_b, anon_1.c_c, anon_1.d_d "
"FROM (SELECT a.a AS a_a, b.b AS b_b, c.c AS c_c, d.d AS d_d "
"FROM a JOIN b ON a.a = b.b "
"JOIN (c JOIN d ON c.c = d.d) ON b.b = c.c) AS anon_1",
)
class JoinConditionTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
def test_join_condition_one(self):
m = MetaData()
t1 = Table("t1", m, Column("id", Integer))
t2 = Table(
"t2", m, Column("id", Integer), Column("t1id", ForeignKey("t1.id"))
)
t3 = Table(
"t3",
m,
Column("id", Integer),
Column("t1id", ForeignKey("t1.id")),
Column("t2id", ForeignKey("t2.id")),
)
t4 = Table(
"t4", m, Column("id", Integer), Column("t2id", ForeignKey("t2.id"))
)
t1t2 = t1.join(t2)
t2t3 = t2.join(t3)
for left, right, a_subset, expected in [
(t1, t2, None, t1.c.id == t2.c.t1id),
(t1t2, t3, t2, t1t2.c.t2_id == t3.c.t2id),
(t2t3, t1, t3, t1.c.id == t3.c.t1id),
(t2t3, t4, None, t2t3.c.t2_id == t4.c.t2id),
(t2t3, t4, t3, t2t3.c.t2_id == t4.c.t2id),
(t2t3.join(t1), t4, None, t2t3.c.t2_id == t4.c.t2id),
(t2t3.join(t1), t4, t1, t2t3.c.t2_id == t4.c.t2id),
(t1t2, t2t3, t2, t1t2.c.t2_id == t2t3.c.t3_t2id),
]:
assert expected.compare(
sql_util.join_condition(left, right, a_subset=a_subset)
)
def test_join_condition_two(self):
m = MetaData()
t1 = Table("t1", m, Column("id", Integer))
t2 = Table(
"t2", m, Column("id", Integer), Column("t1id", ForeignKey("t1.id"))
)
t3 = Table(
"t3",
m,
Column("id", Integer),
Column("t1id", ForeignKey("t1.id")),
Column("t2id", ForeignKey("t2.id")),
)
t4 = Table(
"t4", m, Column("id", Integer), Column("t2id", ForeignKey("t2.id"))
)
t5 = Table(
"t5",
m,
Column("t1id1", ForeignKey("t1.id")),
Column("t1id2", ForeignKey("t1.id")),
)
t1t2 = t1.join(t2)
t2t3 = t2.join(t3)
# these are ambiguous, or have no joins
for left, right, a_subset in [
(t1t2, t3, None),
(t2t3, t1, None),
(t1, t4, None),
(t1t2, t2t3, None),
(t5, t1, None),
(
t5.select()
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.subquery(),
t1,
None,
),
]:
assert_raises(
exc.ArgumentError,
sql_util.join_condition,
left,
right,
a_subset=a_subset,
)
def test_join_condition_three(self):
m = MetaData()
t1 = Table("t1", m, Column("id", Integer))
t2 = Table(
"t2",
m,
Column("id", Integer),
Column("t1id", ForeignKey("t1.id")),
)
t3 = Table(
"t3",
m,
Column("id", Integer),
Column("t1id", ForeignKey("t1.id")),
Column("t2id", ForeignKey("t2.id")),
)
t4 = Table(
"t4",
m,
Column("id", Integer),
Column("t2id", ForeignKey("t2.id")),
)
t1t2 = t1.join(t2)
t2t3 = t2.join(t3)
als = t2t3._anonymous_fromclause()
# test join's behavior, including natural
for left, right, expected in [
(t1, t2, t1.c.id == t2.c.t1id),
(t1t2, t3, t1t2.c.t2_id == t3.c.t2id),
(t2t3, t1, t1.c.id == t3.c.t1id),
(t2t3, t4, t2t3.c.t2_id == t4.c.t2id),
(t2t3, t4, t2t3.c.t2_id == t4.c.t2id),
(t2t3.join(t1), t4, t2t3.c.t2_id == t4.c.t2id),
(t2t3.join(t1), t4, t2t3.c.t2_id == t4.c.t2id),
(t1t2, als, t1t2.c.t2_id == als.c.t3_t2id),
]:
assert expected.compare(left.join(right).onclause)
def test_join_condition_four(self):
m = MetaData()
t1 = Table("t1", m, Column("id", Integer))
t2 = Table(
"t2", m, Column("id", Integer), Column("t1id", ForeignKey("t1.id"))
)
t3 = Table(
"t3",
m,
Column("id", Integer),
Column("t1id", ForeignKey("t1.id")),
Column("t2id", ForeignKey("t2.id")),
)
t1t2 = t1.join(t2)
t2t3 = t2.join(t3)
# these are right-nested joins
j = t1t2.join(t2t3)
assert j.onclause.compare(t2.c.id == t3.c.t2id)
self.assert_compile(
j,
"t1 JOIN t2 ON t1.id = t2.t1id JOIN "
"(t2 JOIN t3 ON t2.id = t3.t2id) ON t2.id = t3.t2id",
)
def test_join_condition_five(self):
m = MetaData()
t1 = Table("t1", m, Column("id", Integer))
t2 = Table(
"t2", m, Column("id", Integer), Column("t1id", ForeignKey("t1.id"))
)
t3 = Table(
"t3",
m,
Column("id", Integer),
Column("t1id", ForeignKey("t1.id")),
Column("t2id", ForeignKey("t2.id")),
)
t1t2 = t1.join(t2)
t2t3 = t2.join(t3)
st2t3 = (
t2t3.select()
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.subquery()
)
j = t1t2.join(st2t3)
assert j.onclause.compare(t2.c.id == st2t3.c.t3_t2id)
self.assert_compile(
j,
"t1 JOIN t2 ON t1.id = t2.t1id JOIN "
"(SELECT t2.id AS t2_id, t2.t1id AS t2_t1id, "
"t3.id AS t3_id, t3.t1id AS t3_t1id, t3.t2id AS t3_t2id "
"FROM t2 JOIN t3 ON t2.id = t3.t2id) AS anon_1 "
"ON t2.id = anon_1.t3_t2id",
)
def test_join_multiple_equiv_fks(self):
m = MetaData()
t1 = Table("t1", m, Column("id", Integer, primary_key=True))
t2 = Table(
"t2",
m,
Column("t1id", Integer, ForeignKey("t1.id"), ForeignKey("t1.id")),
)
assert sql_util.join_condition(t1, t2).compare(t1.c.id == t2.c.t1id)
def test_join_cond_no_such_unrelated_table(self):
m = MetaData()
# bounding the "good" column with two "bad" ones is so to
# try to get coverage to get the "continue" statements
# in the loop...
t1 = Table(
"t1",
m,
Column("y", Integer, ForeignKey("t22.id")),
Column("x", Integer, ForeignKey("t2.id")),
Column("q", Integer, ForeignKey("t22.id")),
)
t2 = Table("t2", m, Column("id", Integer))
assert sql_util.join_condition(t1, t2).compare(t1.c.x == t2.c.id)
assert sql_util.join_condition(t2, t1).compare(t1.c.x == t2.c.id)
def test_join_cond_no_such_unrelated_column(self):
m = MetaData()
t1 = Table(
"t1",
m,
Column("x", Integer, ForeignKey("t2.id")),
Column("y", Integer, ForeignKey("t3.q")),
)
t2 = Table("t2", m, Column("id", Integer))
Table("t3", m, Column("id", Integer))
assert sql_util.join_condition(t1, t2).compare(t1.c.x == t2.c.id)
assert sql_util.join_condition(t2, t1).compare(t1.c.x == t2.c.id)
def test_join_cond_no_such_unrelated_table_dont_compare_names(self):
m = MetaData()
t1 = Table(
"t1",
m,
Column("y", Integer, ForeignKey("t22.id")),
Column("x", Integer, ForeignKey("t2.id")),
Column("q", Integer, ForeignKey("t22.id")),
)
t2 = Table(
"t2",
m,
Column("id", Integer),
Column("t3id", ForeignKey("t3.id")),
Column("z", ForeignKey("t33.id")),
)
t3 = Table(
"t3", m, Column("id", Integer), Column("q", ForeignKey("t4.id"))
)
j1 = t1.join(t2)
assert sql_util.join_condition(j1, t3).compare(t2.c.t3id == t3.c.id)
def test_join_cond_no_such_unrelated_column_dont_compare_names(self):
m = MetaData()
t1 = Table(
"t1",
m,
Column("x", Integer, ForeignKey("t2.id")),
)
t2 = Table(
"t2",
m,
Column("id", Integer),
Column("t3id", ForeignKey("t3.id")),
Column("q", ForeignKey("t5.q")),
)
t3 = Table(
"t3", m, Column("id", Integer), Column("t4id", ForeignKey("t4.id"))
)
t4 = Table("t4", m, Column("id", Integer))
Table("t5", m, Column("id", Integer))
j1 = t1.join(t2)
j2 = t3.join(t4)
assert sql_util.join_condition(j1, j2).compare(t2.c.t3id == t3.c.id)
def test_join_cond_no_such_related_table(self):
m1 = MetaData()
m2 = MetaData()
t1 = Table("t1", m1, Column("x", Integer, ForeignKey("t2.id")))
t2 = Table("t2", m2, Column("id", Integer))
assert_raises_message(
exc.NoReferencedTableError,
"Foreign key associated with column 't1.x' could not find "
"table 't2' with which to generate a foreign key to "
"target column 'id'",
sql_util.join_condition,
t1,
t2,
)
assert_raises_message(
exc.NoReferencedTableError,
"Foreign key associated with column 't1.x' could not find "
"table 't2' with which to generate a foreign key to "
"target column 'id'",
sql_util.join_condition,
t2,
t1,
)
def test_join_cond_no_such_related_column(self):
m = MetaData()
t1 = Table("t1", m, Column("x", Integer, ForeignKey("t2.q")))
t2 = Table("t2", m, Column("id", Integer))
assert_raises_message(
exc.NoReferencedColumnError,
"Could not initialize target column for "
"ForeignKey 't2.q' on table 't1': "
"table 't2' has no column named 'q'",
sql_util.join_condition,
t1,
t2,
)
assert_raises_message(
exc.NoReferencedColumnError,
"Could not initialize target column for "
"ForeignKey 't2.q' on table 't1': "
"table 't2' has no column named 'q'",
sql_util.join_condition,
t2,
t1,
)
class PrimaryKeyTest(fixtures.TestBase, AssertsExecutionResults):
def test_join_pk_collapse_implicit(self):
"""test that redundant columns in a join get 'collapsed' into a
minimal primary key, which is the root column along a chain of
foreign key relationships."""
meta = MetaData()
a = Table("a", meta, Column("id", Integer, primary_key=True))
b = Table(
"b",
meta,
Column("id", Integer, ForeignKey("a.id"), primary_key=True),
)
c = Table(
"c",
meta,
Column("id", Integer, ForeignKey("b.id"), primary_key=True),
)
d = Table(
"d",
meta,
Column("id", Integer, ForeignKey("c.id"), primary_key=True),
)
assert c.c.id.references(b.c.id)
assert not d.c.id.references(a.c.id)
assert list(a.join(b).primary_key) == [a.c.id]
assert list(b.join(c).primary_key) == [b.c.id]
assert list(a.join(b).join(c).primary_key) == [a.c.id]
assert list(b.join(c).join(d).primary_key) == [b.c.id]
assert list(d.join(c).join(b).primary_key) == [b.c.id]
assert list(a.join(b).join(c).join(d).primary_key) == [a.c.id]
def test_join_pk_collapse_explicit(self):
"""test that redundant columns in a join get 'collapsed' into a
minimal primary key, which is the root column along a chain of
explicit join conditions."""
meta = MetaData()
a = Table(
"a",
meta,
Column("id", Integer, primary_key=True),
Column("x", Integer),
)
b = Table(
"b",
meta,
Column("id", Integer, ForeignKey("a.id"), primary_key=True),
Column("x", Integer),
)
c = Table(
"c",
meta,
Column("id", Integer, ForeignKey("b.id"), primary_key=True),
Column("x", Integer),
)
d = Table(
"d",
meta,
Column("id", Integer, ForeignKey("c.id"), primary_key=True),
Column("x", Integer),
)
print(list(a.join(b, a.c.x == b.c.id).primary_key))
assert list(a.join(b, a.c.x == b.c.id).primary_key) == [a.c.id]
assert list(b.join(c, b.c.x == c.c.id).primary_key) == [b.c.id]
assert list(a.join(b).join(c, c.c.id == b.c.x).primary_key) == [a.c.id]
assert list(b.join(c, c.c.x == b.c.id).join(d).primary_key) == [b.c.id]
assert list(b.join(c, c.c.id == b.c.x).join(d).primary_key) == [b.c.id]
assert list(
d.join(b, d.c.id == b.c.id).join(c, b.c.id == c.c.x).primary_key
) == [b.c.id]
assert list(
a.join(b).join(c, c.c.id == b.c.x).join(d).primary_key
) == [a.c.id]
assert list(
a.join(b, and_(a.c.id == b.c.id, a.c.x == b.c.id)).primary_key
) == [a.c.id]
def test_init_doesnt_blowitaway(self):
meta = MetaData()
a = Table(
"a",
meta,
Column("id", Integer, primary_key=True),
Column("x", Integer),
)
b = Table(
"b",
meta,
Column("id", Integer, ForeignKey("a.id"), primary_key=True),
Column("x", Integer),
)
j = a.join(b)
assert list(j.primary_key) == [a.c.id]
j.foreign_keys
assert list(j.primary_key) == [a.c.id]
def test_non_column_clause(self):
meta = MetaData()
a = Table(
"a",
meta,
Column("id", Integer, primary_key=True),
Column("x", Integer),
)
b = Table(
"b",
meta,
Column("id", Integer, ForeignKey("a.id"), primary_key=True),
Column("x", Integer, primary_key=True),
)
j = a.join(b, and_(a.c.id == b.c.id, b.c.x == 5))
assert str(j) == "a JOIN b ON a.id = b.id AND b.x = :x_1", str(j)
assert list(j.primary_key) == [a.c.id, b.c.x]
def test_onclause_direction(self):
metadata = MetaData()
employee = Table(
"Employee",
metadata,
Column("name", String(100)),
Column("id", Integer, primary_key=True),
)
engineer = Table(
"Engineer",
metadata,
Column("id", Integer, ForeignKey("Employee.id"), primary_key=True),
)
eq_(
util.column_set(
employee.join(
engineer, employee.c.id == engineer.c.id
).primary_key
),
util.column_set([employee.c.id]),
)
eq_(
util.column_set(
employee.join(
engineer, engineer.c.id == employee.c.id
).primary_key
),
util.column_set([employee.c.id]),
)
class ReduceTest(fixtures.TestBase, AssertsExecutionResults):
def test_reduce(self):
meta = MetaData()
t1 = Table(
"t1",
meta,
Column("t1id", Integer, primary_key=True),
Column("t1data", String(30)),
)
t2 = Table(
"t2",
meta,
Column("t2id", Integer, ForeignKey("t1.t1id"), primary_key=True),
Column("t2data", String(30)),
)
t3 = Table(
"t3",
meta,
Column("t3id", Integer, ForeignKey("t2.t2id"), primary_key=True),
Column("t3data", String(30)),
)
eq_(
util.column_set(
sql_util.reduce_columns(
[
t1.c.t1id,
t1.c.t1data,
t2.c.t2id,
t2.c.t2data,
t3.c.t3id,
t3.c.t3data,
]
)
),
util.column_set(
[t1.c.t1id, t1.c.t1data, t2.c.t2data, t3.c.t3data]
),
)
def test_reduce_selectable(self):
metadata = MetaData()
engineers = Table(
"engineers",
metadata,
Column("engineer_id", Integer, primary_key=True),
Column("engineer_name", String(50)),
)
managers = Table(
"managers",
metadata,
Column("manager_id", Integer, primary_key=True),
Column("manager_name", String(50)),
)
s = (
select(engineers, managers)
.where(engineers.c.engineer_name == managers.c.manager_name)
.subquery()
)
eq_(
util.column_set(sql_util.reduce_columns(list(s.c), s)),
util.column_set(
[s.c.engineer_id, s.c.engineer_name, s.c.manager_id]
),
)
def test_reduce_generation(self):
m = MetaData()
t1 = Table(
"t1",
m,
Column("x", Integer, primary_key=True),
Column("y", Integer),
)
t2 = Table(
"t2",
m,
Column("z", Integer, ForeignKey("t1.x")),
Column("q", Integer),
)
s1 = select(t1, t2)
s2 = s1.reduce_columns(only_synonyms=False)
eq_(set(s2.selected_columns), {t1.c.x, t1.c.y, t2.c.q})
s2 = s1.reduce_columns()
eq_(set(s2.selected_columns), {t1.c.x, t1.c.y, t2.c.z, t2.c.q})
def test_reduce_only_synonym_fk(self):
m = MetaData()
t1 = Table(
"t1",
m,
Column("x", Integer, primary_key=True),
Column("y", Integer),
)
t2 = Table(
"t2",
m,
Column("x", Integer, ForeignKey("t1.x")),
Column("q", Integer, ForeignKey("t1.y")),
)
s1 = select(t1, t2)
s1 = s1.reduce_columns(only_synonyms=True)
eq_(
set(s1.selected_columns),
{
s1.selected_columns.x,
s1.selected_columns.y,
s1.selected_columns.q,
},
)
def test_reduce_only_synonym_lineage(self):
m = MetaData()
t1 = Table(
"t1",
m,
Column("x", Integer, primary_key=True),
Column("y", Integer),
Column("z", Integer),
)
# test that the first appearance in the columns clause
# wins - t1 is first, t1.c.x wins
s1 = select(t1).subquery()
s2 = select(t1, s1).where(t1.c.x == s1.c.x).where(s1.c.y == t1.c.z)
eq_(
set(s2.reduce_columns().selected_columns),
{t1.c.x, t1.c.y, t1.c.z, s1.c.y, s1.c.z},
)
# reverse order, s1.c.x wins
s1 = select(t1).subquery()
s2 = select(s1, t1).where(t1.c.x == s1.c.x).where(s1.c.y == t1.c.z)
eq_(
set(s2.reduce_columns().selected_columns),
{s1.c.x, t1.c.y, t1.c.z, s1.c.y, s1.c.z},
)
def test_reduce_aliased_join(self):
metadata = MetaData()
people = Table(
"people",
metadata,
Column(
"person_id",
Integer,
normalize_sequence(
config, Sequence("person_id_seq", optional=True)
),
primary_key=True,
),
Column("name", String(50)),
Column("type", String(30)),
)
engineers = Table(
"engineers",
metadata,
Column(
"person_id",
Integer,
ForeignKey("people.person_id"),
primary_key=True,
),
Column("status", String(30)),
Column("engineer_name", String(50)),
Column("primary_language", String(50)),
)
managers = Table(
"managers",
metadata,
Column(
"person_id",
Integer,
ForeignKey("people.person_id"),
primary_key=True,
),
Column("status", String(30)),
Column("manager_name", String(50)),
)
pjoin = (
people.outerjoin(engineers)
.outerjoin(managers)
.select()
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.alias("pjoin")
)
eq_(
util.column_set(
sql_util.reduce_columns(
[
pjoin.c.people_person_id,
pjoin.c.engineers_person_id,
pjoin.c.managers_person_id,
]
)
),
util.column_set([pjoin.c.people_person_id]),
)
def test_reduce_aliased_union(self):
metadata = MetaData()
item_table = Table(
"item",
metadata,
Column(
"id", Integer, ForeignKey("base_item.id"), primary_key=True
),
Column("dummy", Integer, default=0),
)
base_item_table = Table(
"base_item",
metadata,
Column("id", Integer, primary_key=True),
Column("child_name", String(255), default=None),
)
from sqlalchemy.orm.util import polymorphic_union
item_join = polymorphic_union(
{
"BaseItem": base_item_table.select()
.where(base_item_table.c.child_name == "BaseItem")
.subquery(),
"Item": base_item_table.join(item_table),
},
None,
"item_join",
)
eq_(
util.column_set(
sql_util.reduce_columns(
[item_join.c.id, item_join.c.dummy, item_join.c.child_name]
)
),
util.column_set(
[item_join.c.id, item_join.c.dummy, item_join.c.child_name]
),
)
def test_reduce_aliased_union_2(self):
metadata = MetaData()
page_table = Table(
"page", metadata, Column("id", Integer, primary_key=True)
)
magazine_page_table = Table(
"magazine_page",
metadata,
Column(
"page_id", Integer, ForeignKey("page.id"), primary_key=True
),
)
classified_page_table = Table(
"classified_page",
metadata,
Column(
"magazine_page_id",
Integer,
ForeignKey("magazine_page.page_id"),
primary_key=True,
),
)
# this is essentially the union formed by the ORM's
# polymorphic_union function. we define two versions with
# different ordering of selects.
#
# the first selectable has the "real" column
# classified_page.magazine_page_id
pjoin = union(
select(
page_table.c.id,
magazine_page_table.c.page_id,
classified_page_table.c.magazine_page_id,
).select_from(
page_table.join(magazine_page_table).join(
classified_page_table
)
),
select(
page_table.c.id,
magazine_page_table.c.page_id,
cast(null(), Integer).label("magazine_page_id"),
).select_from(page_table.join(magazine_page_table)),
).alias("pjoin")
eq_(
util.column_set(
sql_util.reduce_columns(
[pjoin.c.id, pjoin.c.page_id, pjoin.c.magazine_page_id]
)
),
util.column_set([pjoin.c.id]),
)
# the first selectable has a CAST, which is a placeholder for
# classified_page.magazine_page_id in the second selectable.
# reduce_columns needs to take into account all foreign keys
# derived from pjoin.c.magazine_page_id. the UNION construct
# currently makes the external column look like that of the
# first selectable only.
pjoin = union(
select(
page_table.c.id,
magazine_page_table.c.page_id,
cast(null(), Integer).label("magazine_page_id"),
).select_from(page_table.join(magazine_page_table)),
select(
page_table.c.id,
magazine_page_table.c.page_id,
classified_page_table.c.magazine_page_id,
).select_from(
page_table.join(magazine_page_table).join(
classified_page_table
)
),
).alias("pjoin")
eq_(
util.column_set(
sql_util.reduce_columns(
[pjoin.c.id, pjoin.c.page_id, pjoin.c.magazine_page_id]
)
),
util.column_set([pjoin.c.id]),
)
class DerivedTest(fixtures.TestBase, AssertsExecutionResults):
def test_table(self):
meta = MetaData()
t1 = Table(
"t1",
meta,
Column("c1", Integer, primary_key=True),
Column("c2", String(30)),
)
t2 = Table(
"t2",
meta,
Column("c1", Integer, primary_key=True),
Column("c2", String(30)),
)
assert t1.is_derived_from(t1)
assert not t2.is_derived_from(t1)
def test_alias(self):
meta = MetaData()
t1 = Table(
"t1",
meta,
Column("c1", Integer, primary_key=True),
Column("c2", String(30)),
)
t2 = Table(
"t2",
meta,
Column("c1", Integer, primary_key=True),
Column("c2", String(30)),
)
assert t1.alias().is_derived_from(t1)
assert not t2.alias().is_derived_from(t1)
assert not t1.is_derived_from(t1.alias())
assert not t1.is_derived_from(t2.alias())
def test_select(self):
meta = MetaData()
t1 = Table(
"t1",
meta,
Column("c1", Integer, primary_key=True),
Column("c2", String(30)),
)
t2 = Table(
"t2",
meta,
Column("c1", Integer, primary_key=True),
Column("c2", String(30)),
)
assert t1.select().is_derived_from(t1)
assert not t2.select().is_derived_from(t1)
assert select(t1, t2).is_derived_from(t1)
assert t1.select().alias("foo").is_derived_from(t1)
assert select(t1, t2).alias("foo").is_derived_from(t1)
assert not t2.select().alias("foo").is_derived_from(t1)
def test_join(self):
meta = MetaData()
t1 = Table(
"t1",
meta,
Column("c1", Integer, primary_key=True),
Column("c2", String(30)),
)
t2 = Table(
"t2",
meta,
Column("c1", Integer, primary_key=True),
Column("c2", String(30)),
)
t3 = Table(
"t3",
meta,
Column("c1", Integer, primary_key=True),
Column("c2", String(30)),
)
j1 = t1.join(t2, t1.c.c1 == t2.c.c1)
assert j1.is_derived_from(j1)
assert j1.is_derived_from(t1)
assert j1._annotate({"foo": "bar"}).is_derived_from(j1)
assert not j1.is_derived_from(t3)
class AnnotationsTest(fixtures.TestBase):
def test_hashing(self):
t = table("t", column("x"))
a = t.alias()
for obj in [t, t.c.x, a, t.c.x > 1, (t.c.x > 1).label(None)]:
annot = obj._annotate({})
eq_({obj}, {annot})
def test_clone_annotations_dont_hash(self):
t = table("t", column("x"))
s = t.select()
a = t.alias()
s2 = a.select()
for obj in [s, s2]:
annot = obj._annotate({})
ne_({obj}, {annot})
def test_replacement_traverse_preserve(self):
"""test that replacement traverse that hits an unannotated column
does not use it when replacing an annotated column.
this requires that replacement traverse store elements in the
"seen" hash based on id(), not hash.
"""
t = table("t", column("x"))
stmt = select(t.c.x)
whereclause = annotation._deep_annotate(t.c.x == 5, {"foo": "bar"})
eq_(whereclause._annotations, {"foo": "bar"})
eq_(whereclause.left._annotations, {"foo": "bar"})
eq_(whereclause.right._annotations, {"foo": "bar"})
stmt = stmt.where(whereclause)
s2 = visitors.replacement_traverse(stmt, {}, lambda elem: None)
whereclause = s2._where_criteria[0]
eq_(whereclause._annotations, {"foo": "bar"})
eq_(whereclause.left._annotations, {"foo": "bar"})
eq_(whereclause.right._annotations, {"foo": "bar"})
@testing.combinations(True, False, None)
def test_setup_inherit_cache(self, inherit_cache_value):
if inherit_cache_value is None:
class MyInsertThing(Insert):
pass
else:
class MyInsertThing(Insert):
inherit_cache = inherit_cache_value
t = table("t", column("x"))
anno = MyInsertThing(t)._annotate({"foo": "bar"})
if inherit_cache_value is not None:
is_(type(anno).__dict__["inherit_cache"], inherit_cache_value)
else:
assert "inherit_cache" not in type(anno).__dict__
def test_proxy_set_iteration_includes_annotated(self):
from sqlalchemy.schema import Column
c1 = Column("foo", Integer)
stmt = select(c1).alias()
proxy = stmt.c.foo
proxy.proxy_set
# create an annotated of the column
p2 = proxy._annotate({"weight": 10})
# now see if our annotated version is in that column's
# proxy_set, as corresponding_column iterates through proxy_set
# in this way
d = {}
for col in p2._uncached_proxy_list():
d.update(col._annotations)
eq_(d, {"weight": 10})
def test_proxy_set_iteration_includes_annotated_two(self):
from sqlalchemy.schema import Column
c1 = Column("foo", Integer)
stmt = select(c1).alias()
proxy = stmt.c.foo
c1.proxy_set
proxy._proxies = [c1._annotate({"weight": 10})]
d = {}
for col in proxy._uncached_proxy_list():
d.update(col._annotations)
eq_(d, {"weight": 10})
def test_late_name_add(self):
from sqlalchemy.schema import Column
c1 = Column(Integer)
c1_a = c1._annotate({"foo": "bar"})
c1.name = "somename"
eq_(c1_a.name, "somename")
def test_late_table_add(self):
c1 = Column("foo", Integer)
c1_a = c1._annotate({"foo": "bar"})
t = Table("t", MetaData(), c1)
is_(c1_a.table, t)
def test_basic_attrs(self):
t = Table(
"t",
MetaData(),
Column("x", Integer, info={"q": "p"}),
Column("y", Integer, key="q"),
)
x_a = t.c.x._annotate({})
y_a = t.c.q._annotate({})
t.c.x.info["z"] = "h"
eq_(y_a.key, "q")
is_(x_a.table, t)
eq_(x_a.info, {"q": "p", "z": "h"})
eq_(t.c.x._anon_name_label, x_a._anon_name_label)
def test_custom_constructions(self):
from sqlalchemy.schema import Column
class MyColumn(Column):
def __init__(self):
Column.__init__(self, "foo", Integer)
_constructor = Column
t1 = Table("t1", MetaData(), MyColumn())
s1 = t1.select().subquery()
assert isinstance(t1.c.foo, MyColumn)
assert isinstance(s1.c.foo, Column)
annot_1 = t1.c.foo._annotate({})
s2 = select(annot_1).subquery()
assert isinstance(s2.c.foo, Column)
annot_2 = s1._annotate({})
assert isinstance(annot_2.c.foo, Column)
def test_custom_construction_correct_anno_subclass(self):
# [ticket:2918]
from sqlalchemy.schema import Column
from sqlalchemy.sql.elements import AnnotatedColumnElement
class MyColumn(Column):
pass
assert isinstance(
MyColumn("x", Integer)._annotate({"foo": "bar"}),
AnnotatedColumnElement,
)
def test_custom_construction_correct_anno_expr(self):
# [ticket:2918]
from sqlalchemy.schema import Column
class MyColumn(Column):
pass
col = MyColumn("x", Integer)
col == 5
col_anno = MyColumn("x", Integer)._annotate({"foo": "bar"})
binary_2 = col_anno == 5
eq_(binary_2.left._annotations, {"foo": "bar"})
@testing.combinations(
("plain",),
("annotated",),
("deep_annotated",),
("deep_annotated_w_ind_col",),
argnames="testcase",
)
def test_annotated_corresponding_column(self, testcase):
"""ensures the require_embedded case remains when an inner statement
was copied out for annotations.
First implemented in 2008 in d3621ae961a, the implementation is
updated for #8796 as a performance improvement as well as to
establish a discovered implicit behavior where clone() would break
the contract of corresponding_column() into an explicit option,
fixing the implicit behavior.
"""
table1 = table("table1", column("col1"))
s1 = select(table1.c.col1).subquery()
expect_same = True
if testcase == "plain":
t1 = s1
elif testcase == "annotated":
t1 = s1._annotate({})
elif testcase == "deep_annotated":
# was failing prior to #8796
t1 = sql_util._deep_annotate(s1, {"foo": "bar"})
elif testcase == "deep_annotated_w_ind_col":
# was implicit behavior w/ annotate prior to #8796
t1 = sql_util._deep_annotate(
s1, {"foo": "bar"}, ind_cols_on_fromclause=True
)
expect_same = False
else:
assert False
# t1 needs to share the same _make_proxy() columns as t2, even
# though it's annotated. otherwise paths will diverge once they
# are corresponded against "inner" below.
if expect_same:
assert t1.c is s1.c
assert t1.c.col1 is s1.c.col1
else:
assert t1.c is not s1.c
assert t1.c.col1 is not s1.c.col1
inner = select(s1).subquery()
assert (
inner.corresponding_column(t1.c.col1, require_embedded=False)
is inner.c.col1
)
if expect_same:
assert (
inner.corresponding_column(t1.c.col1, require_embedded=True)
is inner.c.col1
)
else:
assert (
inner.corresponding_column(t1.c.col1, require_embedded=True)
is not inner.c.col1
)
def test_annotated_visit(self):
table1 = table("table1", column("col1"), column("col2"))
bin_ = table1.c.col1 == bindparam("foo", value=None)
assert str(bin_) == "table1.col1 = :foo"
def visit_binary(b):
b.right = table1.c.col2
b2 = visitors.cloned_traverse(bin_, {}, {"binary": visit_binary})
assert str(b2) == "table1.col1 = table1.col2"
b3 = visitors.cloned_traverse(
bin_._annotate({}), {}, {"binary": visit_binary}
)
assert str(b3) == "table1.col1 = table1.col2"
def visit_binary(b):
b.left = bindparam("bar")
b4 = visitors.cloned_traverse(b2, {}, {"binary": visit_binary})
assert str(b4) == ":bar = table1.col2"
b5 = visitors.cloned_traverse(b3, {}, {"binary": visit_binary})
assert str(b5) == ":bar = table1.col2"
def test_label_accessors(self):
t1 = table("t1", column("c1"))
l1 = t1.c.c1.label(None)
is_(l1._order_by_label_element, l1)
l1a = l1._annotate({"foo": "bar"})
is_(l1a._order_by_label_element, l1a)
def test_annotate_aliased(self):
t1 = table("t1", column("c1"))
s = select((t1.c.c1 + 3).label("bat"))
a = s.alias()
a = sql_util._deep_annotate(a, {"foo": "bar"})
eq_(a._annotations["foo"], "bar")
eq_(a.element._annotations["foo"], "bar")
def test_annotate_expressions(self):
table1 = table("table1", column("col1"), column("col2"))
for expr, expected in [
(table1.c.col1, "table1.col1"),
(table1.c.col1 == 5, "table1.col1 = :col1_1"),
(
table1.c.col1.in_([2, 3, 4]),
"table1.col1 IN (__[POSTCOMPILE_col1_1])",
),
]:
eq_(str(expr), expected)
eq_(str(expr._annotate({})), expected)
eq_(str(sql_util._deep_annotate(expr, {})), expected)
eq_(
str(
sql_util._deep_annotate(expr, {}, exclude=[table1.c.col1])
),
expected,
)
def test_deannotate_wrapping(self):
table1 = table("table1", column("col1"), column("col2"))
bin_ = table1.c.col1 == bindparam("foo", value=None)
b2 = sql_util._deep_annotate(bin_, {"_orm_adapt": True})
b3 = sql_util._deep_deannotate(b2)
b4 = sql_util._deep_deannotate(bin_)
for elem in (b2._annotations, b2.left._annotations):
in_("_orm_adapt", elem)
for elem in (
b3._annotations,
b3.left._annotations,
b4._annotations,
b4.left._annotations,
):
eq_(elem, {})
is_not(b2.left, bin_.left)
is_not(b3.left, b2.left)
is_not(b2.left, bin_.left)
is_(b4.left, bin_.left) # since column is immutable
# deannotate copies the element
is_not(bin_.right, b2.right)
is_not(b2.right, b3.right)
is_not(b3.right, b4.right)
def test_deannotate_clone(self):
table1 = table("table1", column("col1"), column("col2"))
subq = (
select(table1).where(table1.c.col1 == bindparam("foo")).subquery()
)
stmt = select(subq)
s2 = sql_util._deep_annotate(stmt, {"_orm_adapt": True})
s3 = sql_util._deep_deannotate(s2)
s4 = sql_util._deep_deannotate(s3)
eq_(stmt._annotations, {})
eq_(subq._annotations, {})
eq_(s2._annotations, {"_orm_adapt": True})
eq_(s3._annotations, {})
eq_(s4._annotations, {})
# select._raw_columns[0] is the subq object
eq_(s2._raw_columns[0]._annotations, {"_orm_adapt": True})
eq_(s3._raw_columns[0]._annotations, {})
eq_(s4._raw_columns[0]._annotations, {})
is_not(s3, s2)
is_not(s4, s3) # deep deannotate makes a clone unconditionally
is_(s3._deannotate(), s3) # regular deannotate returns same object
def test_annotate_unique_traversal(self):
"""test that items are copied only once during
annotate, deannotate traversal
#2453 - however note this was modified by
#1401, and it's likely that re49563072578
is helping us with the str() comparison
case now, as deannotate is making
clones again in some cases.
"""
table1 = table("table1", column("x"))
table2 = table("table2", column("y"))
a1 = table1.alias()
s = select(a1.c.x).select_from(a1.join(table2, a1.c.x == table2.c.y))
for sel in (
sql_util._deep_deannotate(s),
visitors.cloned_traverse(s, {}, {}),
visitors.replacement_traverse(s, {}, lambda x: None),
):
# the columns clause isn't changed at all
assert sel._raw_columns[0].table is a1
froms = list(sel._iterate_from_elements())
assert froms[0].element is froms[1].left.element
eq_(str(s), str(sel))
# when we are modifying annotations sets only
# partially, elements are copied uniquely based on id().
# this is new as of 1.4, previously they'd be copied every time
for sel in (
sql_util._deep_deannotate(s, {"foo": "bar"}),
sql_util._deep_annotate(s, {"foo": "bar"}),
):
froms = list(sel._iterate_from_elements())
assert froms[0] is not froms[1].left
# but things still work out due to
# re49563072578
eq_(str(s), str(sel))
def test_annotate_varied_annot_same_col(self):
"""test two instances of the same column with different annotations
preserving them when deep_annotate is run on them.
"""
t1 = table("table1", column("col1"), column("col2"))
s = select(t1.c.col1._annotate({"foo": "bar"}))
s2 = select(t1.c.col1._annotate({"bat": "hoho"}))
s3 = s.union(s2)
sel = sql_util._deep_annotate(s3, {"new": "thing"})
eq_(
sel.selects[0]._raw_columns[0]._annotations,
{"foo": "bar", "new": "thing"},
)
eq_(
sel.selects[1]._raw_columns[0]._annotations,
{"bat": "hoho", "new": "thing"},
)
def test_deannotate_2(self):
table1 = table("table1", column("col1"), column("col2"))
j = table1.c.col1._annotate(
{"remote": True}
) == table1.c.col2._annotate({"local": True})
j2 = sql_util._deep_deannotate(j)
eq_(j.left._annotations, {"remote": True})
eq_(j2.left._annotations, {})
def test_deannotate_3(self):
table1 = table(
"table1",
column("col1"),
column("col2"),
column("col3"),
column("col4"),
)
j = and_(
table1.c.col1._annotate({"remote": True})
== table1.c.col2._annotate({"local": True}),
table1.c.col3._annotate({"remote": True})
== table1.c.col4._annotate({"local": True}),
)
j2 = sql_util._deep_deannotate(j)
eq_(j.clauses[0].left._annotations, {"remote": True})
eq_(j2.clauses[0].left._annotations, {})
def test_annotate_fromlist_preservation(self):
"""test the FROM list in select still works
even when multiple annotate runs have created
copies of the same selectable
#2453, continued
"""
table1 = table("table1", column("x"))
table2 = table("table2", column("y"))
a1 = table1.alias()
s = select(a1.c.x).select_from(a1.join(table2, a1.c.x == table2.c.y))
assert_s = select(select(s.subquery()).subquery())
for fn in (
sql_util._deep_deannotate,
lambda s: sql_util._deep_annotate(s, {"foo": "bar"}),
lambda s: visitors.cloned_traverse(s, {}, {}),
lambda s: visitors.replacement_traverse(s, {}, lambda x: None),
):
sel = fn(select(fn(select(fn(s.subquery())).subquery())))
eq_(str(assert_s), str(sel))
def test_bind_unique_test(self):
table("t", column("a"), column("b"))
b = bindparam("bind", value="x", unique=True)
# the annotation of "b" should render the
# same. The "unique" test in compiler should
# also pass, [ticket:2425]
eq_(str(or_(b, b._annotate({"foo": "bar"}))), ":bind_1 OR :bind_1")
def test_comparators_cleaned_out_construction(self):
c = column("a")
comp1 = c.comparator
c1 = c._annotate({"foo": "bar"})
comp2 = c1.comparator
assert comp1 is not comp2
def test_comparators_cleaned_out_reannotate(self):
c = column("a")
c1 = c._annotate({"foo": "bar"})
comp1 = c1.comparator
c2 = c1._annotate({"bat": "hoho"})
comp2 = c2.comparator
assert comp1 is not comp2
def test_comparator_cleanout_integration(self):
c = column("a")
c1 = c._annotate({"foo": "bar"})
c1.comparator
c2 = c1._annotate({"bat": "hoho"})
c2.comparator
assert (c2 == 5).left._annotations == {"foo": "bar", "bat": "hoho"}
class ReprTest(fixtures.TestBase):
def test_ensure_repr_elements(self):
for obj in [
elements.Cast(1, Integer()),
elements.TypeClause(String()),
elements.ColumnClause("x"),
elements.BindParameter("q"),
elements.Null(),
elements.True_(),
elements.False_(),
elements.ClauseList(),
elements.BooleanClauseList._construct_raw(operators.and_),
elements.BooleanClauseList._construct_raw(operators.or_),
elements.Tuple(),
elements.Case(),
elements.Extract("foo", column("x")),
elements.UnaryExpression(column("x")),
elements.Grouping(column("x")),
elements.Over(func.foo()),
elements.Label("q", column("x")),
]:
repr(obj)
class WithLabelsTest(AssertsCompiledSQL, fixtures.TestBase):
def _assert_result_keys(self, s, keys):
compiled = s.compile()
eq_(set(compiled._create_result_map()), set(keys))
def _assert_subq_result_keys(self, s, keys):
compiled = s.subquery().select().compile()
eq_(set(compiled._create_result_map()), set(keys))
def _names_overlap(self):
m = MetaData()
t1 = Table("t1", m, Column("x", Integer))
t2 = Table("t2", m, Column("x", Integer))
return select(t1, t2).set_label_style(LABEL_STYLE_NONE)
def test_names_overlap_nolabel(self):
sel = self._names_overlap()
self._assert_result_keys(sel, ["x"])
self._assert_subq_result_keys(sel, ["x", "x_1"])
eq_(sel.selected_columns.keys(), ["x", "x"])
def test_names_overlap_label(self):
sel = self._names_overlap().set_label_style(
LABEL_STYLE_TABLENAME_PLUS_COL
)
eq_(sel.selected_columns.keys(), ["t1_x", "t2_x"])
eq_(list(sel.selected_columns.keys()), ["t1_x", "t2_x"])
eq_(list(sel.subquery().c.keys()), ["t1_x", "t2_x"])
self._assert_result_keys(sel, ["t1_x", "t2_x"])
def _names_overlap_keys_dont(self):
m = MetaData()
t1 = Table("t1", m, Column("x", Integer, key="a"))
t2 = Table("t2", m, Column("x", Integer, key="b"))
return select(t1, t2).set_label_style(LABEL_STYLE_NONE)
def test_names_overlap_keys_dont_nolabel(self):
sel = self._names_overlap_keys_dont()
eq_(sel.selected_columns.keys(), ["a", "b"])
eq_(list(sel.selected_columns.keys()), ["a", "b"])
eq_(list(sel.subquery().c.keys()), ["a", "b"])
self._assert_result_keys(sel, ["x"])
def test_names_overlap_keys_dont_label(self):
sel = self._names_overlap_keys_dont().set_label_style(
LABEL_STYLE_TABLENAME_PLUS_COL
)
eq_(sel.selected_columns.keys(), ["t1_a", "t2_b"])
eq_(list(sel.selected_columns.keys()), ["t1_a", "t2_b"])
eq_(list(sel.subquery().c.keys()), ["t1_a", "t2_b"])
self._assert_result_keys(sel, ["t1_x", "t2_x"])
def _columns_repeated(self):
m = MetaData()
t1 = Table("t1", m, Column("x", Integer), Column("y", Integer))
return select(t1.c.x, t1.c.y, t1.c.x).set_label_style(LABEL_STYLE_NONE)
def test_element_repeated_nolabels(self):
sel = self._columns_repeated().set_label_style(LABEL_STYLE_NONE)
eq_(sel.selected_columns.keys(), ["x", "y", "x"])
eq_(list(sel.selected_columns.keys()), ["x", "y", "x"])
eq_(list(sel.subquery().c.keys()), ["x", "y", "x_1"])
self._assert_result_keys(sel, ["x", "y"])
def test_element_repeated_disambiguate(self):
sel = self._columns_repeated().set_label_style(
LABEL_STYLE_DISAMBIGUATE_ONLY
)
eq_(sel.selected_columns.keys(), ["x", "y", "x_1"])
eq_(list(sel.selected_columns.keys()), ["x", "y", "x_1"])
eq_(list(sel.subquery().c.keys()), ["x", "y", "x_1"])
self._assert_result_keys(sel, ["x", "y", "x__1"])
def test_element_repeated_labels(self):
sel = self._columns_repeated().set_label_style(
LABEL_STYLE_TABLENAME_PLUS_COL
)
eq_(sel.selected_columns.keys(), ["t1_x", "t1_y", "t1_x_1"])
eq_(list(sel.selected_columns.keys()), ["t1_x", "t1_y", "t1_x_1"])
eq_(list(sel.subquery().c.keys()), ["t1_x", "t1_y", "t1_x_1"])
self._assert_result_keys(sel, ["t1_x__1", "t1_x", "t1_y"])
def _columns_repeated_identity(self):
m = MetaData()
t1 = Table("t1", m, Column("x", Integer), Column("y", Integer))
return select(t1.c.x, t1.c.y, t1.c.x, t1.c.x, t1.c.x).set_label_style(
LABEL_STYLE_NONE
)
def _anon_columns_repeated_identity_one(self):
m = MetaData()
t1 = Table("t1", m, Column("x", Integer), Column("y", Integer))
return select(t1.c.x, null(), null(), null()).set_label_style(
LABEL_STYLE_NONE
)
def _anon_columns_repeated_identity_two(self):
fn = func.now()
return select(fn, fn, fn, fn).set_label_style(LABEL_STYLE_NONE)
def test_columns_repeated_identity_disambiguate(self):
"""test #7153"""
sel = self._columns_repeated_identity().set_label_style(
LABEL_STYLE_DISAMBIGUATE_ONLY
)
self.assert_compile(
sel,
"SELECT t1.x, t1.y, t1.x AS x__1, t1.x AS x__2, "
"t1.x AS x__3 FROM t1",
)
def test_columns_repeated_identity_subquery_disambiguate(self):
"""test #7153"""
sel = self._columns_repeated_identity()
stmt = select(sel.subquery()).set_label_style(
LABEL_STYLE_DISAMBIGUATE_ONLY
)
# databases like MySQL won't allow the subquery to have repeated labels
# even if we don't try to access them
self.assert_compile(
stmt,
"SELECT anon_1.x, anon_1.y, anon_1.x AS x_1, anon_1.x AS x_2, "
"anon_1.x AS x_3 FROM "
"(SELECT t1.x AS x, t1.y AS y, t1.x AS x__1, t1.x AS x__2, "
"t1.x AS x__3 FROM t1) AS anon_1",
)
def _labels_overlap(self):
m = MetaData()
t1 = Table("t", m, Column("x_id", Integer))
t2 = Table("t_x", m, Column("id", Integer))
return select(t1, t2)
def test_labels_overlap_nolabel(self):
sel = self._labels_overlap()
eq_(sel.selected_columns.keys(), ["x_id", "id"])
eq_(list(sel.selected_columns.keys()), ["x_id", "id"])
eq_(list(sel.subquery().c.keys()), ["x_id", "id"])
self._assert_result_keys(sel, ["x_id", "id"])
def test_labels_overlap_label(self):
sel = self._labels_overlap().set_label_style(
LABEL_STYLE_TABLENAME_PLUS_COL
)
eq_(
list(sel.selected_columns.keys()),
["t_x_id", "t_x_id_1"],
)
eq_(
list(sel.subquery().c.keys()),
["t_x_id", "t_x_id_1"],
# ["t_x_id", "t_x_id"] # if we turn off deduping entirely,
)
self._assert_result_keys(sel, ["t_x_id", "t_x_id_1"])
self._assert_subq_result_keys(sel, ["t_x_id", "t_x_id_1"])
def _labels_overlap_keylabels_dont(self):
m = MetaData()
t1 = Table("t", m, Column("x_id", Integer, key="a"))
t2 = Table("t_x", m, Column("id", Integer, key="b"))
return select(t1, t2)
def test_labels_overlap_keylabels_dont_nolabel(self):
sel = self._labels_overlap_keylabels_dont()
eq_(list(sel.selected_columns.keys()), ["a", "b"])
eq_(list(sel.subquery().c.keys()), ["a", "b"])
self._assert_result_keys(sel, ["x_id", "id"])
def test_labels_overlap_keylabels_dont_label(self):
sel = self._labels_overlap_keylabels_dont().set_label_style(
LABEL_STYLE_TABLENAME_PLUS_COL
)
eq_(list(sel.selected_columns.keys()), ["t_a", "t_x_b"])
eq_(list(sel.subquery().c.keys()), ["t_a", "t_x_b"])
self._assert_result_keys(sel, ["t_x_id", "t_x_id_1"])
def _keylabels_overlap_labels_dont(self):
m = MetaData()
t1 = Table("t", m, Column("a", Integer, key="x_id"))
t2 = Table("t_x", m, Column("b", Integer, key="id"))
return select(t1, t2)
def test_keylabels_overlap_labels_dont_nolabel(self):
sel = self._keylabels_overlap_labels_dont()
eq_(list(sel.selected_columns.keys()), ["x_id", "id"])
eq_(list(sel.subquery().c.keys()), ["x_id", "id"])
self._assert_result_keys(sel, ["a", "b"])
def test_keylabels_overlap_labels_dont_label(self):
sel = self._keylabels_overlap_labels_dont().set_label_style(
LABEL_STYLE_TABLENAME_PLUS_COL
)
eq_(
list(sel.selected_columns.keys()),
["t_x_id", "t_x_id_1"],
)
eq_(
list(sel.subquery().c.keys()),
["t_x_id", "t_x_id_1"],
)
self._assert_result_keys(sel, ["t_a", "t_x_b"])
self._assert_subq_result_keys(sel, ["t_a", "t_x_b"])
def _keylabels_overlap_labels_overlap(self):
m = MetaData()
t1 = Table("t", m, Column("x_id", Integer, key="x_a"))
t2 = Table("t_x", m, Column("id", Integer, key="a"))
return select(t1, t2)
def test_keylabels_overlap_labels_overlap_nolabel(self):
sel = self._keylabels_overlap_labels_overlap()
eq_(list(sel.selected_columns.keys()), ["x_a", "a"])
eq_(list(sel.subquery().c.keys()), ["x_a", "a"])
self._assert_result_keys(sel, ["x_id", "id"])
self._assert_subq_result_keys(sel, ["x_id", "id"])
def test_keylabels_overlap_labels_overlap_label(self):
sel = self._keylabels_overlap_labels_overlap().set_label_style(
LABEL_STYLE_TABLENAME_PLUS_COL
)
eq_(
list(sel.selected_columns.keys()),
["t_x_a", "t_x_a_1"],
)
# deduping for different cols but same label
eq_(list(sel.subquery().c.keys()), ["t_x_a", "t_x_a_1"])
# if we turn off deduping entirely
# eq_(list(sel.subquery().c.keys()), ["t_x_a", "t_x_a"])
self._assert_result_keys(sel, ["t_x_id", "t_x_id_1"])
self._assert_subq_result_keys(sel, ["t_x_id", "t_x_id_1"])
def _keys_overlap_names_dont(self):
m = MetaData()
t1 = Table("t1", m, Column("a", Integer, key="x"))
t2 = Table("t2", m, Column("b", Integer, key="x"))
return select(t1, t2)
def test_keys_overlap_names_dont_nolabel(self):
sel = self._keys_overlap_names_dont()
eq_(sel.selected_columns.keys(), ["x", "x_1"])
self._assert_result_keys(sel, ["a", "b"])
def test_keys_overlap_names_dont_label(self):
sel = self._keys_overlap_names_dont().set_label_style(
LABEL_STYLE_TABLENAME_PLUS_COL
)
eq_(list(sel.selected_columns.keys()), ["t1_x", "t2_x"])
eq_(list(sel.subquery().c.keys()), ["t1_x", "t2_x"])
self._assert_result_keys(sel, ["t1_a", "t2_b"])
class ResultMapTest(fixtures.TestBase):
def _fixture(self):
m = MetaData()
t = Table("t", m, Column("x", Integer), Column("y", Integer))
return t
def _mapping(self, stmt):
compiled = stmt.compile()
return {
elem: key
for key, elements in compiled._create_result_map().items()
for elem in elements[1]
}
def test_select_label_alt_name(self):
t = self._fixture()
l1, l2 = t.c.x.label("a"), t.c.y.label("b")
s = select(l1, l2)
mapping = self._mapping(s)
assert l1 in mapping
assert t.c.x not in mapping
def test_select_alias_label_alt_name(self):
t = self._fixture()
l1, l2 = t.c.x.label("a"), t.c.y.label("b")
s = select(l1, l2).alias()
mapping = self._mapping(s)
assert l1 in mapping
assert t.c.x not in mapping
def test_select_alias_column(self):
t = self._fixture()
x, y = t.c.x, t.c.y
s = select(x, y).alias()
mapping = self._mapping(s)
assert t.c.x in mapping
def test_select_alias_column_apply_labels(self):
t = self._fixture()
x, y = t.c.x, t.c.y
s = (
select(x, y)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.alias()
)
mapping = self._mapping(s)
assert t.c.x in mapping
def test_select_table_alias_column(self):
t = self._fixture()
x = t.c.x
ta = t.alias()
s = select(ta.c.x, ta.c.y)
mapping = self._mapping(s)
assert x not in mapping
def test_select_label_alt_name_table_alias_column(self):
t = self._fixture()
x = t.c.x
ta = t.alias()
l1, l2 = ta.c.x.label("a"), ta.c.y.label("b")
s = select(l1, l2)
mapping = self._mapping(s)
assert x not in mapping
assert l1 in mapping
assert ta.c.x not in mapping
def test_column_subquery_exists(self):
t = self._fixture()
s = exists().where(t.c.x == 5).select()
mapping = self._mapping(s)
assert t.c.x not in mapping
eq_(
[type(entry[-1]) for entry in s.compile()._result_columns],
[Boolean],
)
def test_plain_exists(self):
expr = exists(text("1"))
eq_(type(expr.type), Boolean)
eq_(
[
type(entry[-1])
for entry in select(expr).compile()._result_columns
],
[Boolean],
)
def test_plain_exists_negate(self):
expr = ~exists(text("1"))
eq_(type(expr.type), Boolean)
eq_(
[
type(entry[-1])
for entry in select(expr).compile()._result_columns
],
[Boolean],
)
def test_plain_exists_double_negate(self):
expr = ~(~exists(text("1")))
eq_(type(expr.type), Boolean)
eq_(
[
type(entry[-1])
for entry in select(expr).compile()._result_columns
],
[Boolean],
)
def test_column_subquery_plain(self):
t = self._fixture()
s1 = select(t.c.x).where(t.c.x > 5).scalar_subquery()
s2 = select(s1)
mapping = self._mapping(s2)
assert t.c.x not in mapping
assert s1 in mapping
eq_(
[type(entry[-1]) for entry in s2.compile()._result_columns],
[Integer],
)
def test_unary_boolean(self):
s1 = select(not_(True)).set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
eq_(
[type(entry[-1]) for entry in s1.compile()._result_columns],
[Boolean],
)
class ForUpdateTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
def test_basic_clone(self):
t = table("t", column("c"))
s = select(t).with_for_update(read=True, of=t.c.c)
s2 = visitors.ReplacingCloningVisitor().traverse(s)
assert s2._for_update_arg is not s._for_update_arg
eq_(s2._for_update_arg.read, True)
eq_(s2._for_update_arg.of, [t.c.c])
self.assert_compile(
s2, "SELECT t.c FROM t FOR SHARE OF t", dialect="postgresql"
)
def test_adapt(self):
t = table("t", column("c"))
s = select(t).with_for_update(read=True, of=t.c.c)
a = t.alias()
s2 = sql_util.ClauseAdapter(a).traverse(s)
eq_(s2._for_update_arg.of, [a.c.c])
self.assert_compile(
s2,
"SELECT t_1.c FROM t AS t_1 FOR SHARE OF t_1",
dialect="postgresql",
)
class AliasTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
def test_direct_element_hierarchy(self):
t = table("t", column("c"))
a1 = t.alias()
a2 = a1.alias()
a3 = a2.alias()
is_(a1.element, t)
is_(a2.element, a1)
is_(a3.element, a2)
def test_get_children_preserves_multiple_nesting(self):
t = table("t", column("c"))
stmt = select(t)
a1 = stmt.alias()
a2 = a1.alias()
eq_(set(a2.get_children(column_collections=False)), {a1})
def test_correspondence_multiple_nesting(self):
t = table("t", column("c"))
stmt = select(t)
a1 = stmt.alias()
a2 = a1.alias()
is_(a1.corresponding_column(a2.c.c), a1.c.c)
def test_copy_internals_multiple_nesting(self):
t = table("t", column("c"))
stmt = select(t)
a1 = stmt.alias()
a2 = a1.alias()
a3 = a2._clone()
a3._copy_internals()
is_(a1.corresponding_column(a3.c.c), a1.c.c)
|
842339112c11e4a344eae7864832fc99e0135b2c
|
578db86c51d44ebddd0dc7b1738985b3dc69eb74
|
/corehq/apps/hqadmin/management/commands/corrupt_couch_nodes.py
|
8a103dd00a20d3e4fd8c5919c789b383e124a3a5
|
[
"BSD-3-Clause"
] |
permissive
|
dimagi/commcare-hq
|
a43c7dd32b5f89c89fd5aa1b1359ab7301f4ff6b
|
e7391ddae1af1dbf118211ecb52c83fc508aa656
|
refs/heads/master
| 2023-08-16T22:38:27.853437
| 2023-08-16T19:07:19
| 2023-08-16T19:07:19
| 247,278
| 499
| 203
|
BSD-3-Clause
| 2023-09-14T19:03:24
| 2009-07-09T17:00:07
|
Python
|
UTF-8
|
Python
| false
| false
| 5,040
|
py
|
corrupt_couch_nodes.py
|
import logging
import sys
from urllib.parse import urlparse, urlunparse
from django.conf import settings
from django.core.management.base import BaseCommand
from couchdbkit import Database
from dimagi.utils.couch.database import retry_on_couch_error
from .corrupt_couch import setup_logging
from ...corrupt_couch import DOC_TYPES_BY_NAME
log = logging.getLogger(__name__)
class Command(BaseCommand):
help = """Query stand-alone couch nodes for missing document ids.
Exhaustively and efficiently find missing documents for an
(optional) range of ids by running against stand-alone (non-
clustered) couch nodes that have snapshot copies of the data from a
corrupt cluster. Multiple instances of this command can be run
simultaneously with different ranges.
"""
def add_arguments(self, parser):
parser.add_argument('nodes', help="comma-delimited list of node IP:PORT pairs")
parser.add_argument('doc_name', choices=list(DOC_TYPES_BY_NAME), help="""
Used to choose a database in which to find missing documents.
""")
parser.add_argument('--range', dest="id_range", help="Doc id range: XXXX..ZZZZ")
parser.add_argument(
'--check-node-integrity',
dest="check",
action="store_true",
help="""Verify that each node returns consistent results."""
)
parser.add_argument('--verbose', action="store_true")
def handle(self, nodes, doc_name, id_range, **options):
setup_logging(options["verbose"])
id_range = id_range.split("..", 1) if id_range else ("", "")
dbname = get_dbname(doc_name)
dbs = get_node_dbs(nodes.split(","), dbname)
run = check_node_integrity if options["check"] else print_missing_ids
try:
run(dbs, id_range)
except KeyboardInterrupt:
log.info("abort.")
finally:
sys.stdout.flush()
def get_dbname(doc_name):
db = DOC_TYPES_BY_NAME[doc_name]["type"].get_db()
return db.dbname
def get_node_dbs(nodes, dbname, username="admin"):
def node_url(proxy_url, node):
return urlunparse(proxy_url._replace(netloc=f'{auth}@{node}'))
proxy_url = urlparse(settings.COUCH_DATABASE)._replace(path=f"/{dbname}")
auth = username + ":" + proxy_url.netloc.split('@')[0].split(":", 1)[1]
return [Database(node_url(proxy_url, node)) for node in nodes]
def print_missing_ids(*args):
for doc_id in iter_missing_ids(*args):
print(doc_id)
def iter_missing_ids(dbs, id_range, chunk_size=10000):
next_id, end_id = id_range
db0, *other_dbs = dbs
drop = False
log.info(f"scan range: {next_id}..{end_id}")
while True:
db0_ids = query_ids(db0, (next_id, end_id), chunk_size)
last_id = max(db0_ids) if db0_ids else {}
id_sets = query_dbs(other_dbs, (next_id, last_id))
id_sets.append(db0_ids)
if drop:
for ids in id_sets:
ids.discard(next_id)
else:
drop = True
if not any(id_sets):
log.info(f"final range: {next_id}..{last_id}")
break
missing = find_missing(id_sets)
log.info(f"..{last_id} => {len(missing)}")
yield from missing
next_id = last_id
@retry_on_couch_error
def query_ids(db, id_range, limit=None):
start_id, end_id = id_range
view_kwargs = {"include_docs": False, "reduce": False}
if start_id:
view_kwargs["startkey"] = start_id
if end_id:
view_kwargs["endkey"] = end_id
if limit:
view_kwargs["limit"] = limit
return {rec["id"] for rec in db.view("_all_docs", **view_kwargs)}
def query_dbs(dbs, *args, **kw):
return [query_ids(db, *args, **kw) for db in dbs]
def find_missing(id_sets):
"""Find ids not present in all sets of ids"""
return set.union(*id_sets) - set.intersection(*id_sets)
def check_node_integrity(dbs, id_range, chunk_size=10000, min_tries=50):
"""Check each db node for consistent results over a given id range"""
start_id, end_id = id_range
next_ids = [start_id] * len(dbs)
while True:
for i, db in enumerate(dbs):
uri = db.uri.rsplit("@")[-1]
next_id = next_ids[i]
log.info(f"checking {chunk_size} ids on {uri} starting at {next_id}...")
reference = None
for x in range(min_tries):
ids = query_ids(db, (next_id, end_id), chunk_size)
if not ids or len(ids) == 1:
log.info(f"empty set: {next_id} - {end_id}")
assert reference is None, sorted(reference)[:10]
return
if reference is None:
reference = ids
continue
if ids != reference:
log.warning(f"integrity violation: {uri} on {x + 1} tries")
log.debug("diff: %s", reference ^ ids)
break
next_ids[i] = max(reference)
|
0985719395aec6292cf1d8694bc62763b8d04104
|
444a9480bce2035565332d4d4654244c0b5cd47b
|
/research/cv/StyTr-2/src/utils/function.py
|
0e4dc13c99d1fdb2e325925fad1545dcbbc9bc35
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
mindspore-ai/models
|
7ede9c6454e77e995e674628204e1c6e76bd7b27
|
eab643f51336dbf7d711f02d27e6516e5affee59
|
refs/heads/master
| 2023-07-20T01:49:34.614616
| 2023-07-17T11:43:18
| 2023-07-17T11:43:18
| 417,393,380
| 301
| 92
|
Apache-2.0
| 2023-05-17T11:22:28
| 2021-10-15T06:38:37
|
Python
|
UTF-8
|
Python
| false
| false
| 1,612
|
py
|
function.py
|
from PIL import Image
import mindspore
from mindspore import ops
def calc_mean_std(feat, eps=1e-5):
# eps is a small value added to the variance to avoid divide-by-zero.
size = feat.shape
N, C = size[:2]
feat_var = feat.view(N, C, -1).var(axis=2) + eps
sqrt = ops.Sqrt()
feat_var = sqrt(feat_var)
feat_std = feat_var.view(N, C, 1, 1)
feat_mean = feat.view(N, C, -1).mean(axis=2).view(N, C, 1, 1)
return feat_mean, feat_std
def calc_mean_std1(feat, eps=1e-5):
# eps is a small value added to the variance to avoid divide-by-zero.
feat_var = feat.var(axis=0) + eps
sqrt = ops.Sqrt()
feat_std = sqrt(feat_var)
feat_mean = feat.mean(axis=0)
return feat_mean, feat_std
def normal(feat, eps=1e-5):
feat_mean, feat_std = calc_mean_std(feat, eps)
normalized = (feat - feat_mean) / feat_std
return normalized
def normal_style(feat, eps=1e-5):
feat_mean, feat_std = calc_mean_std1(feat, eps)
normalized = (feat - feat_mean) / feat_std
return normalized
def Msave_image(images, name):
mul = ops.Mul()
add = ops.Add()
cast = ops.Cast()
transpose = ops.Transpose()
B, _, H, W = images.shape
newimg = Image.new('RGB', (B * H, W), (255, 0, 0))
i = 0
for img in images:
tmp1 = mul(img, 255)
tmp2 = add(tmp1, 0.5)
tmp3 = ops.clip_by_value(tmp2, 0, 255)
tmp4 = transpose(tmp3, (1, 2, 0))
tmp5 = cast(tmp4, mindspore.uint8)
tmp6 = tmp5.asnumpy()
im = Image.fromarray(tmp6)
newimg.paste(im, (i * W, 0))
i += 1
newimg.save(name)
|
cddda225fddd95abb879d996aee97098a484b7d0
|
4b7b459cbc4b52af66330fa20584ea8a35d4e368
|
/src/middlewared/middlewared/pytest/unit/plugins/enclosure/test_slot_mappings.py
|
b49a5d88a69e653cf6331af67df3632a6a0f0261
|
[] |
no_license
|
truenas/middleware
|
563ba85bbc7de6475e9d301e769a75af08b78955
|
4717cbc4560a0a1e47acff5faf4bbff327f7a55a
|
refs/heads/master
| 2023-09-04T03:09:07.016897
| 2023-09-02T00:21:15
| 2023-09-02T00:24:17
| 8,627,760
| 446
| 105
| null | 2023-09-14T20:20:51
| 2013-03-07T13:23:20
|
Python
|
UTF-8
|
Python
| false
| false
| 13,719
|
py
|
test_slot_mappings.py
|
import pytest
from middlewared.plugins.enclosure_ import slot_mappings
@pytest.mark.parametrize('data', [
('R50', {
'any_version': True,
'versions': {
'DEFAULT': {
'product': {
'eDrawer4048S1': {
i: {'orig_slot': i, 'mapped_slot': i} for i in range(1, 25)
},
'eDrawer4048S2': {
i: {'orig_slot': i, 'mapped_slot': j} for i, j in zip(range(1, 25), range(25, 49))
}
},
'id': {
'r50_nvme_enclosure': {
1: {'orig_slot': 1, 'mapped_slot': 49},
2: {'orig_slot': 2, 'mapped_slot': 50},
3: {'orig_slot': 3, 'mapped_slot': 51},
},
'r50b_nvme_enclosure': {
1: {'orig_slot': 1, 'mapped_slot': 49},
2: {'orig_slot': 2, 'mapped_slot': 50},
},
'r50bm_nvme_enclosure': {
1: {'orig_slot': 1, 'mapped_slot': 49},
2: {'orig_slot': 2, 'mapped_slot': 50},
3: {'orig_slot': 3, 'mapped_slot': 51},
4: {'orig_slot': 4, 'mapped_slot': 52},
}
}
}
}
}),
('R50B', {
'any_version': True,
'versions': {
'DEFAULT': {
'product': {
'eDrawer4048S1': {
i: {'orig_slot': i, 'mapped_slot': i} for i in range(1, 25)
},
'eDrawer4048S2': {
i: {'orig_slot': i, 'mapped_slot': j} for i, j in zip(range(1, 25), range(25, 49))
}
},
'id': {
'r50_nvme_enclosure': {
1: {'orig_slot': 1, 'mapped_slot': 49},
2: {'orig_slot': 2, 'mapped_slot': 50},
3: {'orig_slot': 3, 'mapped_slot': 51},
},
'r50b_nvme_enclosure': {
1: {'orig_slot': 1, 'mapped_slot': 49},
2: {'orig_slot': 2, 'mapped_slot': 50},
},
'r50bm_nvme_enclosure': {
1: {'orig_slot': 1, 'mapped_slot': 49},
2: {'orig_slot': 2, 'mapped_slot': 50},
3: {'orig_slot': 3, 'mapped_slot': 51},
4: {'orig_slot': 4, 'mapped_slot': 52},
}
}
}
}
}),
('R50BM', {
'any_version': True,
'versions': {
'DEFAULT': {
'product': {
'eDrawer4048S1': {
i: {'orig_slot': i, 'mapped_slot': i} for i in range(1, 25)
},
'eDrawer4048S2': {
i: {'orig_slot': i, 'mapped_slot': j} for i, j in zip(range(1, 25), range(25, 49))
}
},
'id': {
'r50_nvme_enclosure': {
1: {'orig_slot': 1, 'mapped_slot': 49},
2: {'orig_slot': 2, 'mapped_slot': 50},
3: {'orig_slot': 3, 'mapped_slot': 51},
},
'r50b_nvme_enclosure': {
1: {'orig_slot': 1, 'mapped_slot': 49},
2: {'orig_slot': 2, 'mapped_slot': 50},
},
'r50bm_nvme_enclosure': {
1: {'orig_slot': 1, 'mapped_slot': 49},
2: {'orig_slot': 2, 'mapped_slot': 50},
3: {'orig_slot': 3, 'mapped_slot': 51},
4: {'orig_slot': 4, 'mapped_slot': 52},
}
}
}
}
}),
('R10', {
'any_version': True,
'versions': {
'DEFAULT': {
'model': {
'R10': {
1: {'orig_slot': 1, 'mapped_slot': 1},
5: {'orig_slot': 5, 'mapped_slot': 2},
9: {'orig_slot': 9, 'mapped_slot': 3},
13: {'orig_slot': 13, 'mapped_slot': 4},
2: {'orig_slot': 2, 'mapped_slot': 5},
6: {'orig_slot': 6, 'mapped_slot': 6},
10: {'orig_slot': 10, 'mapped_slot': 7},
14: {'orig_slot': 14, 'mapped_slot': 8},
3: {'orig_slot': 3, 'mapped_slot': 9},
7: {'orig_slot': 7, 'mapped_slot': 10},
11: {'orig_slot': 11, 'mapped_slot': 11},
15: {'orig_slot': 15, 'mapped_slot': 12},
4: {'orig_slot': 4, 'mapped_slot': 13},
8: {'orig_slot': 8, 'mapped_slot': 14},
12: {'orig_slot': 12, 'mapped_slot': 15},
16: {'orig_slot': 16, 'mapped_slot': 16}
}
}
}
}
}),
('R20', {
'any_version': True,
'versions': {
'DEFAULT': {
'model': {
'R20': {
3: {'orig_slot': 3, 'mapped_slot': 1},
6: {'orig_slot': 6, 'mapped_slot': 2},
9: {'orig_slot': 9, 'mapped_slot': 3},
12: {'orig_slot': 12, 'mapped_slot': 4},
2: {'orig_slot': 2, 'mapped_slot': 5},
5: {'orig_slot': 5, 'mapped_slot': 6},
8: {'orig_slot': 8, 'mapped_slot': 7},
11: {'orig_slot': 11, 'mapped_slot': 8},
1: {'orig_slot': 1, 'mapped_slot': 9},
4: {'orig_slot': 4, 'mapped_slot': 10},
7: {'orig_slot': 7, 'mapped_slot': 11},
10: {'orig_slot': 10, 'mapped_slot': 12}
}
},
'id': {
'3000000000000001': {
1: {'orig_slot': 1, 'mapped_slot': 13},
2: {'orig_slot': 2, 'mapped_slot': 14}
}
}
}
}
}),
('R20B', {
'any_version': True,
'versions': {
'DEFAULT': {
'model': {
'R20B': {
3: {'orig_slot': 3, 'mapped_slot': 1},
6: {'orig_slot': 6, 'mapped_slot': 2},
9: {'orig_slot': 9, 'mapped_slot': 3},
12: {'orig_slot': 12, 'mapped_slot': 4},
2: {'orig_slot': 2, 'mapped_slot': 5},
5: {'orig_slot': 5, 'mapped_slot': 6},
8: {'orig_slot': 8, 'mapped_slot': 7},
11: {'orig_slot': 11, 'mapped_slot': 8},
1: {'orig_slot': 1, 'mapped_slot': 9},
4: {'orig_slot': 4, 'mapped_slot': 10},
7: {'orig_slot': 7, 'mapped_slot': 11},
10: {'orig_slot': 10, 'mapped_slot': 12}
}
},
'id': {
'3000000000000001': {
1: {'orig_slot': 1, 'mapped_slot': 13},
2: {'orig_slot': 2, 'mapped_slot': 14}
}
}
}
}
}),
('R20A', {
'any_version': True,
'versions': {
'DEFAULT': {
'model': {
'R20A': {
3: {'orig_slot': 3, 'mapped_slot': 1},
6: {'orig_slot': 6, 'mapped_slot': 2},
9: {'orig_slot': 9, 'mapped_slot': 3},
12: {'orig_slot': 12, 'mapped_slot': 4},
2: {'orig_slot': 2, 'mapped_slot': 5},
5: {'orig_slot': 5, 'mapped_slot': 6},
8: {'orig_slot': 8, 'mapped_slot': 7},
11: {'orig_slot': 11, 'mapped_slot': 8},
1: {'orig_slot': 1, 'mapped_slot': 9},
4: {'orig_slot': 4, 'mapped_slot': 10},
7: {'orig_slot': 7, 'mapped_slot': 11},
10: {'orig_slot': 10, 'mapped_slot': 12}
}
},
'id': {
'3000000000000001': {
1: {'orig_slot': 1, 'mapped_slot': 13},
2: {'orig_slot': 2, 'mapped_slot': 14}
}
}
}
}
}),
('MINI-3.0-E', {
'any_version': True,
'versions': {
'DEFAULT': {
'id': {
'3000000000000001': {
1: {'orig_slot': 1, 'mapped_slot': 1},
2: {'orig_slot': 2, 'mapped_slot': 2},
3: {'orig_slot': 3, 'mapped_slot': 3},
4: {'orig_slot': 4, 'mapped_slot': 4},
5: {'orig_slot': 5, 'mapped_slot': 5},
6: {'orig_slot': 6, 'mapped_slot': 6}
}
}
}
}
}),
('MINI-3.0-E+', {
'any_version': True,
'versions': {
'DEFAULT': {
'id': {
'3000000000000001': {
1: {'orig_slot': 1, 'mapped_slot': 1},
2: {'orig_slot': 2, 'mapped_slot': 2},
3: {'orig_slot': 3, 'mapped_slot': 3},
4: {'orig_slot': 4, 'mapped_slot': 4},
},
'3000000000000002': {
1: {'orig_slot': 1, 'mapped_slot': 5},
2: {'orig_slot': 2, 'mapped_slot': 6}
}
}
}
}
}),
('MINI-3.0-X', {
'any_version': True,
'versions': {
# TODO: 1.0 "version" has same mapping?? (CORE is the same)
'DEFAULT': {
'id': {
'3000000000000001': {
1: {'orig_slot': 1, 'mapped_slot': 1},
2: {'orig_slot': 2, 'mapped_slot': 2},
3: {'orig_slot': 3, 'mapped_slot': 3},
4: {'orig_slot': 4, 'mapped_slot': 4},
},
'3000000000000002': {
1: {'orig_slot': 1, 'mapped_slot': 5},
2: {'orig_slot': 2, 'mapped_slot': 6},
4: {'orig_slot': 4, 'mapped_slot': 7}
}
}
}
}
}),
('MINI-3.0-X+', {
'any_version': True,
'versions': {
'DEFAULT': {
'id': {
'3000000000000001': {
1: {'orig_slot': 1, 'mapped_slot': 1},
2: {'orig_slot': 2, 'mapped_slot': 2},
3: {'orig_slot': 3, 'mapped_slot': 3},
4: {'orig_slot': 4, 'mapped_slot': 4},
5: {'orig_slot': 5, 'mapped_slot': 5},
6: {'orig_slot': 6, 'mapped_slot': 6},
7: {'orig_slot': 7, 'mapped_slot': 7}
}
}
}
}
}),
('MINI-3.0-XL+', {
'any_version': True,
'versions': {
'DEFAULT': {
'id': {
'3000000000000002': {
6: {'orig_slot': 6, 'mapped_slot': 1},
},
'3000000000000001': {
1: {'orig_slot': 1, 'mapped_slot': 2},
2: {'orig_slot': 2, 'mapped_slot': 3},
3: {'orig_slot': 3, 'mapped_slot': 4},
4: {'orig_slot': 4, 'mapped_slot': 5},
5: {'orig_slot': 5, 'mapped_slot': 6},
6: {'orig_slot': 6, 'mapped_slot': 7},
7: {'orig_slot': 6, 'mapped_slot': 8},
8: {'orig_slot': 6, 'mapped_slot': 9}
}
}
}
}
}),
('MINI-R', {
'any_version': True,
'versions': {
'DEFAULT': {
'id': {
'3000000000000001': {
1: {'orig_slot': 1, 'mapped_slot': 2},
2: {'orig_slot': 2, 'mapped_slot': 3},
3: {'orig_slot': 3, 'mapped_slot': 4},
4: {'orig_slot': 4, 'mapped_slot': 5},
5: {'orig_slot': 5, 'mapped_slot': 6},
6: {'orig_slot': 6, 'mapped_slot': 7},
7: {'orig_slot': 6, 'mapped_slot': 8}
},
'3000000000000002': {
4: {'orig_slot': 4, 'mapped_slot': 9},
5: {'orig_slot': 5, 'mapped_slot': 10},
6: {'orig_slot': 6, 'mapped_slot': 11},
7: {'orig_slot': 7, 'mapped_slot': 12}
}
}
}
}
}),
('BAD-MODEL', None)
])
def test_slot_mappings(data):
model, expected_result = data
assert slot_mappings.get_slot_info(model) == expected_result
|
e626a9d509c725195f5394a3557103c6032cab19
|
6415c13547e6943f7b65337cbd2790c4e18723c8
|
/netbox/netbox/search/__init__.py
|
6d53e9a97e3215bdb29fbc38bc809faf528f3599
|
[
"Apache-2.0"
] |
permissive
|
netbox-community/netbox
|
287254a9698270d51f57b1297118e9f01536da5a
|
506884bc4dc70299db3e2a7ad577dd7fd808065e
|
refs/heads/develop
| 2023-08-24T09:11:46.685121
| 2023-08-23T18:44:14
| 2023-08-23T18:44:14
| 52,796,596
| 8,122
| 1,817
|
Apache-2.0
| 2023-09-14T18:16:01
| 2016-02-29T14:15:46
|
Python
|
UTF-8
|
Python
| false
| false
| 3,862
|
py
|
__init__.py
|
from collections import namedtuple
from django.db import models
from ipam.fields import IPAddressField, IPNetworkField
from netbox.registry import registry
ObjectFieldValue = namedtuple('ObjectFieldValue', ('name', 'type', 'weight', 'value'))
class FieldTypes:
FLOAT = 'float'
INTEGER = 'int'
STRING = 'str'
INET = 'inet'
CIDR = 'cidr'
class LookupTypes:
PARTIAL = 'icontains'
EXACT = 'iexact'
STARTSWITH = 'istartswith'
ENDSWITH = 'iendswith'
REGEX = 'iregex'
class SearchIndex:
"""
Base class for building search indexes.
Attributes:
model: The model class for which this index is used.
category: The label of the group under which this indexer is categorized (for form field display). If none,
the name of the model's app will be used.
fields: An iterable of two-tuples defining the model fields to be indexed and the weight associated with each.
"""
model = None
category = None
fields = ()
@staticmethod
def get_field_type(instance, field_name):
"""
Return the data type of the specified model field.
"""
field_cls = instance._meta.get_field(field_name).__class__
if issubclass(field_cls, (models.FloatField, models.DecimalField)):
return FieldTypes.FLOAT
if issubclass(field_cls, IPAddressField):
return FieldTypes.INET
if issubclass(field_cls, IPNetworkField):
return FieldTypes.CIDR
if issubclass(field_cls, models.IntegerField):
return FieldTypes.INTEGER
return FieldTypes.STRING
@staticmethod
def get_field_value(instance, field_name):
"""
Return the value of the specified model field as a string.
"""
return str(getattr(instance, field_name))
@classmethod
def get_category(cls):
return cls.category or cls.model._meta.app_config.verbose_name
@classmethod
def to_cache(cls, instance, custom_fields=None):
"""
Return a list of ObjectFieldValue representing the instance fields to be cached.
Args:
instance: The instance being cached.
custom_fields: An iterable of CustomFields to include when caching the instance. If None, all custom fields
defined for the model will be included. (This can also be provided during bulk caching to avoid looking
up the available custom fields for each instance.)
"""
values = []
# Capture built-in fields
for name, weight in cls.fields:
type_ = cls.get_field_type(instance, name)
value = cls.get_field_value(instance, name)
if type_ and value:
values.append(
ObjectFieldValue(name, type_, weight, value)
)
# Capture custom fields
if getattr(instance, 'custom_field_data', None):
if custom_fields is None:
custom_fields = instance.custom_fields
for cf in custom_fields:
type_ = cf.search_type
value = instance.custom_field_data.get(cf.name)
weight = cf.search_weight
if type_ and value and weight:
values.append(
ObjectFieldValue(f'cf_{cf.name}', type_, weight, value)
)
return values
def get_indexer(model):
"""
Get the SearchIndex class for the given model.
"""
label = f'{model._meta.app_label}.{model._meta.model_name}'
return registry['search'][label]
def register_search(cls):
"""
Decorator for registering a SearchIndex class.
"""
model = cls.model
label = f'{model._meta.app_label}.{model._meta.model_name}'
registry['search'][label] = cls
return cls
|
80d45ce30c37f8fb293a76900abeef5d1009d78c
|
069c2295076c482afadfe6351da5ae02be8e18e6
|
/django/middleware/cache.py
|
0fdffe1bbeee48e15fc1d5fcc52cfb4a6c4150e8
|
[
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"GPL-1.0-or-later",
"Python-2.0.1",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-permissive",
"Python-2.0"
] |
permissive
|
django/django
|
5eb557f57053631cd4f566f451e43197309dbeeb
|
c74a6fad5475495756a5bdb18b2cab2b68d429bc
|
refs/heads/main
| 2023-09-01T03:43:44.033530
| 2023-08-31T08:27:32
| 2023-08-31T08:27:32
| 4,164,482
| 73,530
| 38,187
|
BSD-3-Clause
| 2023-09-14T20:03:48
| 2012-04-28T02:47:18
|
Python
|
UTF-8
|
Python
| false
| false
| 7,951
|
py
|
cache.py
|
"""
Cache middleware. If enabled, each Django-powered page will be cached based on
URL. The canonical way to enable cache middleware is to set
``UpdateCacheMiddleware`` as your first piece of middleware, and
``FetchFromCacheMiddleware`` as the last::
MIDDLEWARE = [
'django.middleware.cache.UpdateCacheMiddleware',
...
'django.middleware.cache.FetchFromCacheMiddleware'
]
This is counter-intuitive, but correct: ``UpdateCacheMiddleware`` needs to run
last during the response phase, which processes middleware bottom-up;
``FetchFromCacheMiddleware`` needs to run last during the request phase, which
processes middleware top-down.
The single-class ``CacheMiddleware`` can be used for some simple sites.
However, if any other piece of middleware needs to affect the cache key, you'll
need to use the two-part ``UpdateCacheMiddleware`` and
``FetchFromCacheMiddleware``. This'll most often happen when you're using
Django's ``LocaleMiddleware``.
More details about how the caching works:
* Only GET or HEAD-requests with status code 200 are cached.
* The number of seconds each page is stored for is set by the "max-age" section
of the response's "Cache-Control" header, falling back to the
CACHE_MIDDLEWARE_SECONDS setting if the section was not found.
* This middleware expects that a HEAD request is answered with the same response
headers exactly like the corresponding GET request.
* When a hit occurs, a shallow copy of the original response object is returned
from process_request.
* Pages will be cached based on the contents of the request headers listed in
the response's "Vary" header.
* This middleware also sets ETag, Last-Modified, Expires and Cache-Control
headers on the response object.
"""
from django.conf import settings
from django.core.cache import DEFAULT_CACHE_ALIAS, caches
from django.utils.cache import (
get_cache_key,
get_max_age,
has_vary_header,
learn_cache_key,
patch_response_headers,
)
from django.utils.deprecation import MiddlewareMixin
class UpdateCacheMiddleware(MiddlewareMixin):
"""
Response-phase cache middleware that updates the cache if the response is
cacheable.
Must be used as part of the two-part update/fetch cache middleware.
UpdateCacheMiddleware must be the first piece of middleware in MIDDLEWARE
so that it'll get called last during the response phase.
"""
def __init__(self, get_response):
super().__init__(get_response)
self.cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
self.page_timeout = None
self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
self.cache_alias = settings.CACHE_MIDDLEWARE_ALIAS
@property
def cache(self):
return caches[self.cache_alias]
def _should_update_cache(self, request, response):
return hasattr(request, "_cache_update_cache") and request._cache_update_cache
def process_response(self, request, response):
"""Set the cache, if needed."""
if not self._should_update_cache(request, response):
# We don't need to update the cache, just return.
return response
if response.streaming or response.status_code not in (200, 304):
return response
# Don't cache responses that set a user-specific (and maybe security
# sensitive) cookie in response to a cookie-less request.
if (
not request.COOKIES
and response.cookies
and has_vary_header(response, "Cookie")
):
return response
# Don't cache a response with 'Cache-Control: private'
if "private" in response.get("Cache-Control", ()):
return response
# Page timeout takes precedence over the "max-age" and the default
# cache timeout.
timeout = self.page_timeout
if timeout is None:
# The timeout from the "max-age" section of the "Cache-Control"
# header takes precedence over the default cache timeout.
timeout = get_max_age(response)
if timeout is None:
timeout = self.cache_timeout
elif timeout == 0:
# max-age was set to 0, don't cache.
return response
patch_response_headers(response, timeout)
if timeout and response.status_code == 200:
cache_key = learn_cache_key(
request, response, timeout, self.key_prefix, cache=self.cache
)
if hasattr(response, "render") and callable(response.render):
response.add_post_render_callback(
lambda r: self.cache.set(cache_key, r, timeout)
)
else:
self.cache.set(cache_key, response, timeout)
return response
class FetchFromCacheMiddleware(MiddlewareMixin):
"""
Request-phase cache middleware that fetches a page from the cache.
Must be used as part of the two-part update/fetch cache middleware.
FetchFromCacheMiddleware must be the last piece of middleware in MIDDLEWARE
so that it'll get called last during the request phase.
"""
def __init__(self, get_response):
super().__init__(get_response)
self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
self.cache_alias = settings.CACHE_MIDDLEWARE_ALIAS
@property
def cache(self):
return caches[self.cache_alias]
def process_request(self, request):
"""
Check whether the page is already cached and return the cached
version if available.
"""
if request.method not in ("GET", "HEAD"):
request._cache_update_cache = False
return None # Don't bother checking the cache.
# try and get the cached GET response
cache_key = get_cache_key(request, self.key_prefix, "GET", cache=self.cache)
if cache_key is None:
request._cache_update_cache = True
return None # No cache information available, need to rebuild.
response = self.cache.get(cache_key)
# if it wasn't found and we are looking for a HEAD, try looking just for that
if response is None and request.method == "HEAD":
cache_key = get_cache_key(
request, self.key_prefix, "HEAD", cache=self.cache
)
response = self.cache.get(cache_key)
if response is None:
request._cache_update_cache = True
return None # No cache information available, need to rebuild.
# hit, return cached response
request._cache_update_cache = False
return response
class CacheMiddleware(UpdateCacheMiddleware, FetchFromCacheMiddleware):
"""
Cache middleware that provides basic behavior for many simple sites.
Also used as the hook point for the cache decorator, which is generated
using the decorator-from-middleware utility.
"""
def __init__(self, get_response, cache_timeout=None, page_timeout=None, **kwargs):
super().__init__(get_response)
# We need to differentiate between "provided, but using default value",
# and "not provided". If the value is provided using a default, then
# we fall back to system defaults. If it is not provided at all,
# we need to use middleware defaults.
try:
key_prefix = kwargs["key_prefix"]
if key_prefix is None:
key_prefix = ""
self.key_prefix = key_prefix
except KeyError:
pass
try:
cache_alias = kwargs["cache_alias"]
if cache_alias is None:
cache_alias = DEFAULT_CACHE_ALIAS
self.cache_alias = cache_alias
except KeyError:
pass
if cache_timeout is not None:
self.cache_timeout = cache_timeout
self.page_timeout = page_timeout
|
f13bef072077d304e202689c0b4178d744dfeb44
|
753cd066a9bd26b6c37c8d53a86c7a9c659ec18c
|
/vision/mae/pytorch/util/checkpoint.py
|
32abe9f6168a8f307a43073e86c1657d9a579a31
|
[
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license",
"Apache-2.0",
"CC-BY-NC-4.0"
] |
permissive
|
graphcore/examples
|
ac872015808ed2a913d4d7bf0d63202ce15ebbae
|
e2f834dd60e7939672c1795b4ac62e89ad0bca49
|
refs/heads/master
| 2023-08-05T02:08:12.341836
| 2023-07-27T11:13:10
| 2023-07-27T11:13:10
| 143,977,106
| 311
| 80
|
MIT
| 2023-09-11T16:42:56
| 2018-08-08T07:29:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,256
|
py
|
checkpoint.py
|
# Copyright (c) 2022 Graphcore Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import os
import sys
def save_checkpoint(epoch, model, optimizer, path):
save_state = {"model": model.state_dict(), "optimizer": optimizer.state_dict(), "epoch": epoch}
torch.save(save_state, f"{path}/model_{epoch}.pth")
torch.save(save_state, f"{path}/checkpoint.pth")
def load_checkpoint(model, optimizer, path):
assert os.path.exists(path), f"{path} not exists"
model_state = torch.load(path)
epoch = model_state["epoch"]
weights = model_state["model"]
optimizer_weights = model_state["optimizer"]
model.load_state_dict(weights)
optimizer.load_state_dict(optimizer_weights)
return epoch
|
bd50ba3da7f86a2465a2133d8099eb2297582c72
|
e5a6076462ebe087cb236bee1bc6cfb45325983c
|
/amadeus/shopping/_activities.py
|
af5e4aa063a228c83844bb37dce855755ae16390
|
[
"MIT"
] |
permissive
|
amadeus4dev/amadeus-python
|
727273113badb2c117397dc9c1fe08b220536d7b
|
84b0c99292e28a8697883274a0137a07a411de6a
|
refs/heads/master
| 2023-06-29T17:26:16.339811
| 2023-06-22T14:45:42
| 2023-06-22T14:45:42
| 126,029,888
| 184
| 81
|
MIT
| 2023-09-14T06:59:18
| 2018-03-20T14:25:13
|
Python
|
UTF-8
|
Python
| false
| false
| 954
|
py
|
_activities.py
|
from amadeus.client.decorator import Decorator
from amadeus.shopping.activities._by_square \
import BySquare
class Activities(Decorator, object):
def __init__(self, client):
Decorator.__init__(self, client)
self.by_square = BySquare(client)
def get(self, **params):
'''
Returns activities for a given location
.. code-block:: python
client.shopping.activities.get(
longitude=2.160873,
latitude=41.397158
)
:param latitude: latitude of geographic location to search around.
For example: ``41.397158``
:param longitude: longitude of geographic location to search around.
For example: ``2.160873``
:rtype: amadeus.Response
:raises amadeus.ResponseError: if the request could not be completed
'''
return self.client.get(
'/v1/shopping/activities', **params)
|
924d272402e03943e6cbd52235a4c085c7734176
|
6fe86ea636a69fff9174df6407839f0164407bdb
|
/tt/optimize/tt_min.py
|
0023746c8920746295110b4664a6f33a99f1efe9
|
[
"MIT"
] |
permissive
|
oseledets/ttpy
|
9104e8014a73667b1cfc4fd867593cd8a6097ba0
|
a50d5e0ce2a033a4b1aa703715cb85d715b9b34a
|
refs/heads/master
| 2023-03-06T12:44:43.804115
| 2022-12-14T23:37:57
| 2022-12-14T23:37:57
| 5,499,019
| 220
| 77
|
MIT
| 2022-12-14T23:37:58
| 2012-08-21T18:22:27
|
Python
|
UTF-8
|
Python
| false
| false
| 9,981
|
py
|
tt_min.py
|
"""This module contains a prototype implementation of the
TT-cross-based minimization procedure
"""
from __future__ import print_function, absolute_import, division
from six.moves import xrange
import numpy as np
import math
import tt
from ..maxvol import maxvol
from ..utils.rect_maxvol import rect_maxvol
def reshape(a, sz):
return np.reshape(a, sz, order='F')
def mkron(a, b):
return np.kron(a, b)
def mysvd(a, full_matrices=False):
try:
return np.linalg.svd(a, full_matrices)
except:
return np.linalg.svd(a + np.max(np.abs(a).flatten()) * 1e-14 *
np.random.randn(a.shape[0], a.shape[1]), full_matrices)
def min_func(fun, bounds_min, bounds_max, d=None, rmax=10,
n0=64, nswp=10, verb=True, smooth_fun=None):
"""Find (approximate) minimal value of the function on a d-dimensional grid."""
if d is None:
d = len(bounds_min)
a = np.asanyarray(bounds_min).copy()
b = np.asanyarray(bounds_max).copy()
else:
a = np.ones(d) * bounds_min
b = np.ones(d) * bounds_max
if smooth_fun is None:
smooth_fun = lambda p, lam: (math.pi / 2 - np.arctan(p - lam))
#smooth_fun = lambda p, lam: np.exp(-10*(p - lam))
# We do not need to store the cores, only the interfaces!
Rx = [[]] * (d + 1) # Python list for the interfaces
Rx[0] = np.ones((1, 1))
Rx[d] = np.ones((1, 1))
Jy = [np.empty(0, dtype=np.int)] * (d + 1)
ry = rmax * np.ones(d + 1, dtype=np.int)
ry[0] = 1
ry[d] = 1
n = n0 * np.ones(d, dtype=np.int)
fun_evals = 0
grid = [np.reshape(np.linspace(a[i], b[i], n[i]), (n[i], 1))
for i in xrange(d)]
for i in xrange(d - 1):
#cr1 = y[i]
ry[i + 1] = min(ry[i + 1], n[i] * ry[i])
cr1 = np.random.randn(ry[i], n[i], ry[i + 1])
cr1 = reshape(cr1, (ry[i] * n[i], ry[i + 1]))
q, r = np.linalg.qr(cr1)
ind = maxvol(q)
w1 = mkron(np.ones((n[i], 1), dtype=np.int), Jy[i])
w2 = mkron(grid[i], np.ones((ry[i], 1), dtype=np.int))
Jy[i + 1] = np.hstack((w1, w2))
Jy[i + 1] = reshape(Jy[i + 1], (ry[i] * n[i], -1))
Jy[i + 1] = Jy[i + 1][ind, :]
# Jy{i+1} = [kron(ones(n(i),1), Jy{i}), kron((1:n(i))', ones(ry(i),1))];
# Jy{i+1} = Jy{i+1}(ind,:);
swp = 0
dirn = -1
i = d - 1
lm = float('Inf')
while swp < nswp:
# Right-to-left sweep
# The idea: compute the current core; compute the function of it;
# Shift locally or globally? Local shift would be the first try
# Compute the current core
if np.size(Jy[i]) == 0:
w1 = np.zeros((ry[i] * n[i] * ry[i + 1], 0), dtype=np.int)
else:
w1 = mkron(np.ones((n[i] * ry[i + 1], 1), dtype=np.int), Jy[i])
w2 = mkron(mkron(np.ones((ry[i + 1], 1), dtype=np.int),
grid[i]), np.ones((ry[i], 1), dtype=np.int))
if np.size(Jy[i + 1]) == 0:
w3 = np.zeros((ry[i] * n[i] * ry[i + 1], 0), dtype=np.int)
else:
w3 = mkron(Jy[i + 1], np.ones((ry[i] * n[i], 1), dtype=np.int))
J = np.hstack((w1, w2, w3))
# Just add some random indices to J, which is rnr x d, need to make rn (r + r0) x add,
# i.e., just generate random r, random n and random multiindex
cry = fun(J)
fun_evals += cry.size
cry = reshape(cry, (ry[i], n[i], ry[i + 1]))
min_cur = np.min(cry.flatten("F"))
ind_cur = np.argmin(cry.flatten("F"))
if lm > min_cur:
lm = min_cur
x_full = J[ind_cur, :]
val = fun(x_full)
if verb:
print('New record:', val, 'Point:', x_full, 'fevals:', fun_evals)
cry = smooth_fun(cry, lm)
if (dirn < 0 and i > 0):
cry = reshape(cry, (ry[i], n[i] * ry[i + 1]))
cry = cry.T
#q, r = np.linalg.qr(cry)
u, s, v = mysvd(cry, full_matrices=False)
ry[i] = min(ry[i], rmax)
q = u[:, :ry[i]]
ind = rect_maxvol(q)[0] # maxvol(q)
ry[i] = ind.size
w1 = mkron(np.ones((ry[i + 1], 1), dtype=np.int), grid[i])
if np.size(Jy[i + 1]) == 0:
w2 = np.zeros((n[i] * ry[i + 1], 0), dtype=np.int)
else:
w2 = mkron(Jy[i + 1], np.ones((n[i], 1), dtype=np.int))
Jy[i] = np.hstack((w1, w2))
Jy[i] = reshape(Jy[i], (n[i] * ry[i + 1], -1))
Jy[i] = Jy[i][ind, :]
if (dirn > 0 and i < d - 1):
cry = reshape(cry, (ry[i] * n[i], ry[i + 1]))
q, r = np.linalg.qr(cry)
#ind = maxvol(q)
ind = rect_maxvol(q)[0]
ry[i + 1] = ind.size
w1 = mkron(np.ones((n[i], 1), dtype=np.int), Jy[i])
w2 = mkron(grid[i], np.ones((ry[i], 1), dtype=np.int))
Jy[i + 1] = np.hstack((w1, w2))
Jy[i + 1] = reshape(Jy[i + 1], (ry[i] * n[i], -1))
Jy[i + 1] = Jy[i + 1][ind, :]
i += dirn
if i == d or i == -1:
dirn = -dirn
i += dirn
swp = swp + 1
return val, x_full
def min_tens(tens, rmax=10, nswp=10, verb=True, smooth_fun=None):
"""Find (approximate) minimal element in a TT-tensor."""
if smooth_fun is None:
smooth_fun = lambda p, lam: (math.pi / 2 - np.arctan(p - lam))
d = tens.d
Rx = [[]] * (d + 1) # Python list for the interfaces
Rx[0] = np.ones((1, 1))
Rx[d] = np.ones((1, 1))
Jy = [np.empty(0, dtype=np.int)] * (d + 1)
ry = rmax * np.ones(d + 1, dtype=np.int)
ry[0] = 1
ry[d] = 1
n = tens.n
elements_seen = 0
phi_left = [np.empty(0)] * (d + 1)
phi_left[0] = np.array([1])
phi_right = [np.empty(0)] * (d + 1)
phi_right[d] = np.array([1])
cores = tt.tensor.to_list(tens)
# Fill initial multiindex J randomly.
grid = [np.reshape(range(n[i]), (n[i], 1)) for i in xrange(d)]
for i in xrange(d - 1):
ry[i + 1] = min(ry[i + 1], n[i] * ry[i])
ind = sorted(np.random.permutation(ry[i] * n[i])[0:ry[i + 1]])
w1 = mkron(np.ones((n[i], 1), dtype=np.int), Jy[i])
w2 = mkron(grid[i], np.ones((ry[i], 1), dtype=np.int))
Jy[i + 1] = np.hstack((w1, w2))
Jy[i + 1] = reshape(Jy[i + 1], (ry[i] * n[i], -1))
Jy[i + 1] = Jy[i + 1][ind, :]
phi_left[i + 1] = np.tensordot(phi_left[i], cores[i], 1)
phi_left[i + 1] = reshape(phi_left[i + 1], (ry[i] * n[i], -1))
phi_left[i + 1] = phi_left[i + 1][ind, :]
swp = 0
dirn = -1
i = d - 1
lm = float('Inf')
while swp < nswp:
# Right-to-left sweep
# The idea: compute the current core; compute the function of it;
# Shift locally or globally? Local shift would be the first try
# Compute the current core
if np.size(Jy[i]) == 0:
w1 = np.zeros((ry[i] * n[i] * ry[i + 1], 0), dtype=np.int)
else:
w1 = mkron(np.ones((n[i] * ry[i + 1], 1), dtype=np.int), Jy[i])
w2 = mkron(mkron(np.ones((ry[i + 1], 1), dtype=np.int),
grid[i]), np.ones((ry[i], 1), dtype=np.int))
if np.size(Jy[i + 1]) == 0:
w3 = np.zeros((ry[i] * n[i] * ry[i + 1], 0), dtype=np.int)
else:
w3 = mkron(Jy[i + 1], np.ones((ry[i] * n[i], 1), dtype=np.int))
J = np.hstack((w1, w2, w3))
phi_right[i] = np.tensordot(cores[i], phi_right[i + 1], 1)
phi_right[i] = reshape(phi_right[i], (-1, n[i] * ry[i + 1]))
cry = np.tensordot(
phi_left[i], np.tensordot(
cores[i], phi_right[
i + 1], 1), 1)
elements_seen += cry.size
cry = reshape(cry, (ry[i], n[i], ry[i + 1]))
min_cur = np.min(cry.flatten("F"))
ind_cur = np.argmin(cry.flatten("F"))
if lm > min_cur:
lm = min_cur
x_full = J[ind_cur, :]
val = tens[x_full]
if verb:
print('New record:', val, 'Point:', x_full, 'elements seen:', elements_seen)
cry = smooth_fun(cry, lm)
if dirn < 0 and i > 0:
cry = reshape(cry, (ry[i], n[i] * ry[i + 1]))
cry = cry.T
#q, r = np.linalg.qr(cry)
u, s, v = mysvd(cry, full_matrices=False)
ry[i] = min(ry[i], rmax)
q = u[:, :ry[i]]
ind = rect_maxvol(q)[0] # maxvol(q)
ry[i] = ind.size
w1 = mkron(np.ones((ry[i + 1], 1), dtype=np.int), grid[i])
if np.size(Jy[i + 1]) == 0:
w2 = np.zeros((n[i] * ry[i + 1], 0), dtype=np.int)
else:
w2 = mkron(Jy[i + 1], np.ones((n[i], 1), dtype=np.int))
Jy[i] = np.hstack((w1, w2))
Jy[i] = reshape(Jy[i], (n[i] * ry[i + 1], -1))
Jy[i] = Jy[i][ind, :]
phi_right[i] = np.tensordot(cores[i], phi_right[i + 1], 1)
phi_right[i] = reshape(phi_right[i], (-1, n[i] * ry[i + 1]))
phi_right[i] = phi_right[i][:, ind]
if dirn > 0 and i < d - 1:
cry = reshape(cry, (ry[i] * n[i], ry[i + 1]))
q, r = np.linalg.qr(cry)
#ind = maxvol(q)
ind = rect_maxvol(q)[0]
ry[i + 1] = ind.size
phi_left[i + 1] = np.tensordot(phi_left[i], cores[i], 1)
phi_left[i + 1] = reshape(phi_left[i + 1], (ry[i] * n[i], -1))
phi_left[i + 1] = phi_left[i + 1][ind, :]
w1 = mkron(np.ones((n[i], 1), dtype=np.int), Jy[i])
w2 = mkron(grid[i], np.ones((ry[i], 1), dtype=np.int))
Jy[i + 1] = np.hstack((w1, w2))
Jy[i + 1] = reshape(Jy[i + 1], (ry[i] * n[i], -1))
Jy[i + 1] = Jy[i + 1][ind, :]
i += dirn
if i == d or i == -1:
dirn = -dirn
i += dirn
swp = swp + 1
return val, x_full
|
e1c9ab797ad51f82294f631fb0d6f9688d64f4bf
|
d068d41e02ab116cbd83ee9298c9ba357c668f85
|
/setup.py
|
a4fe1081c3744a750de6d049d91af0f0bbfc40b2
|
[
"BSD-3-Clause"
] |
permissive
|
chrisjsewell/ipypublish
|
01f362cdf0989e119111a089bb307f52e23c1ef7
|
53fa92c4c7f18e36d8a9790b10de27219882f4e4
|
refs/heads/develop
| 2022-02-08T04:26:32.081511
| 2020-08-14T01:18:09
| 2020-08-14T01:18:09
| 96,322,423
| 233
| 42
|
BSD-3-Clause
| 2021-11-20T18:58:33
| 2017-07-05T13:29:38
|
HTML
|
UTF-8
|
Python
| false
| false
| 4,276
|
py
|
setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Setup for ipypublish."""
import io
from importlib import import_module
from setuptools import setup, find_packages
with open("requirements.txt") as f:
requirements = f.read().splitlines()
with io.open("README.md") as readme:
readme_str = readme.read()
setup(
name="ipypublish",
version=import_module("ipypublish").__version__,
description=(
"A workflow for creating and editing publication ready "
"scientific reports, from one or more Jupyter Notebooks"
),
long_description=readme_str,
long_description_content_type="text/markdown",
install_requires=requirements,
extras_require={
"sphinx": {"sphinx>=1.8", "sphinxcontrib-bibtex"},
"tests": {
"pytest>=3.6",
"pytest-regressions",
"pytest-cov",
"coverage",
"pillow",
"nbsphinx>=0.5,<0.6",
"ipykernel",
"sphinx>=1.6,<3",
"sphinxcontrib-bibtex",
"texsoup<=0.1.4",
},
"code_style": [
"black==19.3b0",
"pre-commit==1.17.0",
"flake8<3.8.0,>=3.7.0",
"doc8<0.9.0,>=0.8.0",
"pygments", # required by doc8
],
"science": {"matplotlib", "numpy", "pandas", "sympy"},
"rtd": {
"recommonmark>=0.5",
"pytest>=4.4",
"pillow",
"numpy",
"matplotlib",
"pandas",
"sympy<1.3",
"sphinx>=1.8",
"sphinxcontrib-bibtex",
"ipykernel",
"ipywidgets>=7.5,<8",
},
},
license="MIT",
author="Chris Sewell",
author_email="chrisj_sewell@hotmail.com",
url="https://github.com/chrisjsewell/ipypublish",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Environment :: Web Environment",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Science/Research",
"Intended Audience :: Financial and Insurance Industry",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Scientific/Engineering",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Utilities",
"Framework :: Sphinx :: Extension",
],
keywords="python, jupyter-notebook, nbconvert, pandoc, latex, pdf",
zip_safe=True,
packages=find_packages(),
include_package_data=True,
entry_points={
"console_scripts": [
"nbpublish = ipypublish.frontend.nbpublish:run",
"nbpresent = ipypublish.frontend.nbpresent:run",
"ipubpandoc = ipypublish.filters_pandoc.main:pandoc_filters",
],
"ipypublish.postprocessors": [
"remove-blank-lines = ipypublish.postprocessors.stream_modify:RemoveBlankLines",
"remove-trailing-space = ipypublish.postprocessors.stream_modify:RemoveTrailingSpace",
"filter-output-files = ipypublish.postprocessors.stream_modify:FilterOutputFiles",
"fix-slide-refs = ipypublish.postprocessors.stream_modify:FixSlideReferences",
"pdf-export = ipypublish.postprocessors.pdfexport:PDFExport",
"write-stream = ipypublish.postprocessors.to_stream:WriteStream",
"write-text-file = ipypublish.postprocessors.file_actions:WriteTextFile",
"remove-folder = ipypublish.postprocessors.file_actions:RemoveFolder",
"write-resource-files = ipypublish.postprocessors.file_actions:WriteResourceFiles",
"copy-resource-paths = ipypublish.postprocessors.file_actions:CopyResourcePaths",
"reveal-server = ipypublish.postprocessors.reveal_serve:RevealServer",
"run-sphinx = ipypublish.postprocessors.sphinx:RunSphinx [sphinx]",
"convert-bibgloss = ipypublish.postprocessors.convert_bibgloss:ConvertBibGloss",
],
},
)
|
61e96dfbc5a2456197348b15bb49fe567b5ba725
|
7a9beade653ebec41c8b6751057f97b199daddd6
|
/tests/convolve/test_generic_separable_filters.py
|
41f4c92f677287f2a3f0093155e6fd53052fa2e8
|
[
"BSD-3-Clause"
] |
permissive
|
maweigert/gputools
|
4939bdbf0ecd4891f79827203b565fcf4b2b0ecf
|
4ca3b013879c18cf8d4c8b1d91b5681a3287616c
|
refs/heads/master
| 2023-01-24T09:24:03.924781
| 2021-12-03T17:18:20
| 2021-12-03T17:18:20
| 39,986,100
| 101
| 18
|
BSD-3-Clause
| 2021-12-02T23:31:10
| 2015-07-31T04:11:05
|
Python
|
UTF-8
|
Python
| false
| false
| 2,503
|
py
|
test_generic_separable_filters.py
|
from __future__ import print_function, unicode_literals, absolute_import, division
import numpy as np
import numpy.testing as npt
from itertools import product, combinations_with_replacement
import scipy.ndimage.filters as spf
import gputools
from gputools.convolve import max_filter, min_filter, uniform_filter
np.random.seed(0)
def _test_single(filt1, filt2, dshape, size , cval = 0., dtype = np.float32, strides=(1,1), skip_assert = False):
d = np.random.randint(0,200, dshape).astype(dtype)
ss_stride = tuple(slice(0,None,s) for s in strides)
out1 = filt1(d, size, strides=strides, cval=cval)
out2 = filt2(d, size, mode = "constant", cval = cval)[ss_stride]
out1,out2 = out1/200 ,out2/200
print(("shape: %s \tsize: %s\t cval: %.2f\t dtype: %s\t stride: %s \tdifference: %s" % (dshape, size, cval, dtype, strides, np.amax(np.abs(1.*out1 - out2)))))
if not skip_assert:
npt.assert_almost_equal(out1,out2, decimal = 1)
else:
print(np.allclose(out1,out2, rtol=1e-1))
return d, out1, out2
def _test_some(filt1, filt2, cval = 0.):
stridess = {2:((1,1),(2,2),(4,3)), 3:((1,1,1),(2,2,2),(4,1,1),(3,2,5))}
for ndim in [2,3]:
for dshape in combinations_with_replacement([32,44,53],ndim):
for size in [3,7,13]:
for dtype in (np.uint8, np.uint16, np.float32):
for strides in stridess[ndim]:
_test_single(filt1, filt2, dshape,size, cval = cval, strides=strides, dtype = dtype)
def test_all():
print("~"*40, " maximum filter")
_test_some(max_filter, spf.maximum_filter, cval = -np.inf)
print("~" * 40, " minimum filter")
_test_some(min_filter, spf.minimum_filter, cval = np.inf)
print("~" * 40, " uniform filter")
_test_some(uniform_filter, spf.uniform_filter, cval = 0.)
if __name__ == '__main__':
# _test_some(uniform_filter, spf.uniform_filter, cval = 0.)
# _test_some(max_filter, spf.maximum_filter, cval = -np.inf)
# _test_some(min_filter, spf.minimum_filter, cval=np.inf)
# test_all()
np.random.seed(27)
# x, a,b = _test_single(uniform_filter, spf.uniform_filter, (32,32), 3, strides=(1,1), dtype=np.uint8, cval = 0, skip_assert=True)
x, a,b = _test_single(uniform_filter, spf.uniform_filter, (4,4), 2, strides=(1,1), dtype=np.uint8, cval = 0, skip_assert=True)
# x = np.zeros((8,8), np.uint8)
# x[4,4] = 8
# x[4,5] = 8
# u1 = uniform_filter(x,3)
# u2 = spf.uniform_filter(x,3)
|
e4796add4e975333676583037bb17695f3a98271
|
61a148d684047323f866017c6c95e0dc78682c43
|
/core/amber/src/main/python/pytexera/udf/examples/count_batch_operator.py
|
44346be136bb746795b570abc4601b10e6407409
|
[
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] |
permissive
|
Texera/texera
|
9dd92dd0999fd78ff37cb6241f3395d475549e27
|
ca554ecad8e161b489aa17bdb17c9249ef888b6d
|
refs/heads/master
| 2023-09-03T21:46:42.147647
| 2023-08-31T21:42:30
| 2023-08-31T21:42:30
| 53,976,910
| 129
| 61
|
Apache-2.0
| 2023-09-14T15:53:52
| 2016-03-15T20:38:46
|
Scala
|
UTF-8
|
Python
| false
| false
| 311
|
py
|
count_batch_operator.py
|
from pytexera import *
class CountBatchOperator(UDFBatchOperator):
BATCH_SIZE = 10
def __init__(self):
super().__init__()
self.count = 0
@overrides
def process_batch(self, batch: Batch, port: int) -> Iterator[Optional[BatchLike]]:
self.count += 1
yield batch
|
9ca274533b42ddf70953d4e8898f686e3308754a
|
fbdc48c28e54fb33ae4842ef95ff63893902c99a
|
/scripts/examples/09-OpenMV-Boards/01-WiFi-Shield/http_client.py
|
559ce1dc9c2e45dfccff8877b22b77f7563aaa6e
|
[
"MIT"
] |
permissive
|
openmv/openmv
|
44d4b79fc8693950a2e330e5e0fd95b5c36e230f
|
8a90e070a88b7fc14c87a00351b9c4a213278419
|
refs/heads/master
| 2023-08-30T20:59:57.227603
| 2023-08-23T16:50:55
| 2023-08-23T16:50:55
| 14,360,940
| 2,150
| 1,226
|
MIT
| 2023-09-14T07:18:15
| 2013-11-13T10:23:44
|
C
|
UTF-8
|
Python
| false
| false
| 791
|
py
|
http_client.py
|
# Simple HTTP client example.
import network
import usocket
# AP info
SSID = "" # Network SSID
KEY = "" # Network key
PORT = 80
HOST = "www.google.com"
# Init wlan module and connect to network
print("Trying to connect... (may take a while)...")
wlan = network.WINC()
wlan.connect(SSID, key=KEY, security=wlan.WPA_PSK)
# We should have a valid IP now via DHCP
print(wlan.ifconfig())
# Get addr info via DNS
addr = usocket.getaddrinfo(HOST, PORT)[0][4]
print(addr)
# Create a new socket and connect to addr
client = usocket.socket(usocket.AF_INET, usocket.SOCK_STREAM)
client.connect(addr)
# Set timeout
client.settimeout(3.0)
# Send HTTP request and recv response
client.send("GET / HTTP/1.1\r\nHost: %s\r\n\r\n" % (HOST))
print(client.recv(1024))
# Close socket
client.close()
|
b38997fb4ee5d3f269fc25012a9192ae7211909a
|
ee87c715e5d937b0380ddb87d56e9ebc4877a02b
|
/benchmarks/bench_hist_gradient_boosting_higgsboson.py
|
65be02ec0c4b926d6d38ab69e385b060968d0942
|
[
"BSD-3-Clause"
] |
permissive
|
scikit-learn/scikit-learn
|
27a2196f3173e0f32f7a5c5d652b70a6c57c7644
|
061f8777b48e5491b0c57bb8e0bc7067c103079d
|
refs/heads/main
| 2023-08-18T15:32:59.764468
| 2023-08-18T14:39:08
| 2023-08-18T14:39:08
| 843,222
| 58,456
| 29,777
|
BSD-3-Clause
| 2023-09-14T19:08:34
| 2010-08-17T09:43:38
|
Python
|
UTF-8
|
Python
| false
| false
| 4,120
|
py
|
bench_hist_gradient_boosting_higgsboson.py
|
import argparse
import os
from gzip import GzipFile
from time import time
from urllib.request import urlretrieve
import numpy as np
import pandas as pd
from joblib import Memory
from sklearn.ensemble import HistGradientBoostingClassifier
from sklearn.ensemble._hist_gradient_boosting.utils import get_equivalent_estimator
from sklearn.metrics import accuracy_score, roc_auc_score
from sklearn.model_selection import train_test_split
parser = argparse.ArgumentParser()
parser.add_argument("--n-leaf-nodes", type=int, default=31)
parser.add_argument("--n-trees", type=int, default=10)
parser.add_argument("--lightgbm", action="store_true", default=False)
parser.add_argument("--xgboost", action="store_true", default=False)
parser.add_argument("--catboost", action="store_true", default=False)
parser.add_argument("--learning-rate", type=float, default=1.0)
parser.add_argument("--subsample", type=int, default=None)
parser.add_argument("--max-bins", type=int, default=255)
parser.add_argument("--no-predict", action="store_true", default=False)
parser.add_argument("--cache-loc", type=str, default="/tmp")
parser.add_argument("--no-interactions", type=bool, default=False)
args = parser.parse_args()
HERE = os.path.dirname(__file__)
URL = "https://archive.ics.uci.edu/ml/machine-learning-databases/00280/HIGGS.csv.gz"
m = Memory(location=args.cache_loc, mmap_mode="r")
n_leaf_nodes = args.n_leaf_nodes
n_trees = args.n_trees
subsample = args.subsample
lr = args.learning_rate
max_bins = args.max_bins
@m.cache
def load_data():
filename = os.path.join(HERE, URL.rsplit("/", 1)[-1])
if not os.path.exists(filename):
print(f"Downloading {URL} to {filename} (2.6 GB)...")
urlretrieve(URL, filename)
print("done.")
print(f"Parsing {filename}...")
tic = time()
with GzipFile(filename) as f:
df = pd.read_csv(f, header=None, dtype=np.float32)
toc = time()
print(f"Loaded {df.values.nbytes / 1e9:0.3f} GB in {toc - tic:0.3f}s")
return df
def fit(est, data_train, target_train, libname):
print(f"Fitting a {libname} model...")
tic = time()
est.fit(data_train, target_train)
toc = time()
print(f"fitted in {toc - tic:.3f}s")
def predict(est, data_test, target_test):
if args.no_predict:
return
tic = time()
predicted_test = est.predict(data_test)
predicted_proba_test = est.predict_proba(data_test)
toc = time()
roc_auc = roc_auc_score(target_test, predicted_proba_test[:, 1])
acc = accuracy_score(target_test, predicted_test)
print(f"predicted in {toc - tic:.3f}s, ROC AUC: {roc_auc:.4f}, ACC: {acc :.4f}")
df = load_data()
target = df.values[:, 0]
data = np.ascontiguousarray(df.values[:, 1:])
data_train, data_test, target_train, target_test = train_test_split(
data, target, test_size=0.2, random_state=0
)
n_classes = len(np.unique(target))
if subsample is not None:
data_train, target_train = data_train[:subsample], target_train[:subsample]
n_samples, n_features = data_train.shape
print(f"Training set with {n_samples} records with {n_features} features.")
if args.no_interactions:
interaction_cst = [[i] for i in range(n_features)]
else:
interaction_cst = None
est = HistGradientBoostingClassifier(
loss="log_loss",
learning_rate=lr,
max_iter=n_trees,
max_bins=max_bins,
max_leaf_nodes=n_leaf_nodes,
early_stopping=False,
random_state=0,
verbose=1,
interaction_cst=interaction_cst,
)
fit(est, data_train, target_train, "sklearn")
predict(est, data_test, target_test)
if args.lightgbm:
est = get_equivalent_estimator(est, lib="lightgbm", n_classes=n_classes)
fit(est, data_train, target_train, "lightgbm")
predict(est, data_test, target_test)
if args.xgboost:
est = get_equivalent_estimator(est, lib="xgboost", n_classes=n_classes)
fit(est, data_train, target_train, "xgboost")
predict(est, data_test, target_test)
if args.catboost:
est = get_equivalent_estimator(est, lib="catboost", n_classes=n_classes)
fit(est, data_train, target_train, "catboost")
predict(est, data_test, target_test)
|
c168369dccf26f89304f9c2561d90c12bc0979c7
|
2f679ea4787bcd765dc9d3025a03f15a25d360cb
|
/docker/lr-pb/merge_ccs_reports.py
|
003795e7915c7a975590ccdad49f4f23b324a483
|
[
"BSD-3-Clause"
] |
permissive
|
broadinstitute/long-read-pipelines
|
f7d0958c23b68c4143d350c0b77b62d0bbea914e
|
9620d58f49f29dd2f27fa5f30f72c8257aa2064b
|
refs/heads/main
| 2023-08-31T20:46:00.456332
| 2023-08-15T18:36:14
| 2023-08-15T18:36:14
| 186,657,809
| 101
| 23
|
BSD-3-Clause
| 2023-09-06T14:39:55
| 2019-05-14T16:12:33
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,182
|
py
|
merge_ccs_reports.py
|
import argparse
import re
parser = argparse.ArgumentParser(description='Merge CCS reports.')
parser.add_argument('ccs_report', metavar='R', type=str, nargs='+', help='CCS report(s)')
args = parser.parse_args()
d = {}
for ccs_report in args.ccs_report:
file = open(ccs_report, "r")
for line in file:
if len(line) > 1 and ':' in line:
a = line.rstrip().split(":")
k = a[0].rstrip()
v = float(re.sub(" ", "", re.sub(" \(.*$", "", a[1])))
if k not in d:
d[k] = 0.0;
d[k] = d[k] + v
print(f'ZMWs input : {d["ZMWs input"]}')
print(f'')
print(f'ZMWs pass filters : {d["ZMWs pass filters"]} ({(100.0*d["ZMWs pass filters"]/d["ZMWs input"]):.2f}%)')
print(f'ZMWs fail filters : {d["ZMWs fail filters"]} ({(100.0*d["ZMWs fail filters"]/d["ZMWs input"]):.2f}%)')
print(f'ZMWs shortcut filters : {d["ZMWs shortcut filters"]} ({(100.0*d["ZMWs shortcut filters"]/d["ZMWs input"]):.2f}%)')
print(f'')
print(f'ZMWs with tandem repeats : {d["ZMWs with tandem repeats"]} ({(100.0*d["ZMWs with tandem repeats"]/d["ZMWs input"]):.2f}%)')
print(f'')
print(f'Exclusive counts for ZMWs failing filters:')
print(f'Below SNR threshold : {d["Below SNR threshold"]} ({(100.0*d["Below SNR threshold"]/d["ZMWs fail filters"]):.2f}%)')
print(f'Median length filter : {d["Median length filter"]} ({(100.0*d["Median length filter"]/d["ZMWs fail filters"]):.2f}%)')
print(f'Lacking full passes : {d["Lacking full passes"]} ({(100.0*d["Lacking full passes"]/d["ZMWs fail filters"]):.2f}%)')
print(f'Heteroduplex insertions : {d["Heteroduplex insertions"]} ({(100.0*d["Heteroduplex insertions"]/d["ZMWs fail filters"]):.2f}%)')
print(f'Coverage drops : {d["Coverage drops"]} ({(100.0*d["Coverage drops"]/d["ZMWs fail filters"]):.2f}%)')
print(f'Insufficient draft cov : {d["Insufficient draft cov"]} ({(100.0*d["Insufficient draft cov"]/d["ZMWs fail filters"]):.2f}%)')
print(f'Draft too different : {d["Draft too different"]} ({(100.0*d["Draft too different"]/d["ZMWs fail filters"]):.2f}%)')
print(f'Draft generation error : {d["Draft generation error"]} ({(100.0*d["Draft generation error"]/d["ZMWs fail filters"]):.2f}%)')
print(f'Draft above --max-length : {d["Draft above --max-length"]} ({(100.0*d["Draft above --max-length"]/d["ZMWs fail filters"]):.2f}%)')
print(f'Draft below --min-length : {d["Draft below --min-length"]} ({(100.0*d["Draft below --min-length"]/d["ZMWs fail filters"]):.2f}%)')
print(f'Reads failed polishing : {d["Reads failed polishing"]} ({(100.0*d["Reads failed polishing"]/d["ZMWs fail filters"]):.2f}%)')
print(f'Empty coverage windows : {d["Empty coverage windows"]} ({(100.0*d["Empty coverage windows"]/d["ZMWs fail filters"]):.2f}%)')
print(f'CCS did not converge : {d["CCS did not converge"]} ({(100.0*d["CCS did not converge"]/d["ZMWs fail filters"]):.2f}%)')
print(f'CCS below minimum RQ : {d["CCS below minimum RQ"]} ({(100.0*d["CCS below minimum RQ"]/d["ZMWs fail filters"]):.2f}%)')
print(f'Unknown error : {d["Unknown error"]} ({(100.0*d["Unknown error"]/d["ZMWs fail filters"]):.2f}%)')
|
62f8aa326ac110d70ccaaee7ef6802928a87ac2f
|
7f24023d365e013ec0924844c1a872edfb0c75b4
|
/tests/bugs/test-200907231705.py
|
425d8f56bb1358c1d8de7803a1dcc1d324d79a1e
|
[
"Python-2.0",
"MIT",
"Apache-2.0"
] |
permissive
|
pabigot/pyxb
|
cd42c024607572c6363682d389e9296caf3f2857
|
5ee5ba54c9f702dc9c9efc2731ee547ecd4dae4a
|
refs/heads/next
| 2023-05-11T03:23:19.599756
| 2023-04-29T20:38:15
| 2023-04-29T20:45:13
| 20,547,850
| 130
| 63
|
Apache-2.0
| 2021-08-19T16:52:18
| 2014-06-06T01:49:03
|
Python
|
UTF-8
|
Python
| false
| false
| 4,068
|
py
|
test-200907231705.py
|
# -*- coding: utf-8 -*-
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
import pyxb.binding.generate
import pyxb.binding.datatypes as xs
import pyxb.binding.basis
import pyxb.utils.domutils
from pyxb.utils import six
import os.path
xsd='''<?xml version="1.0" encoding="UTF-8"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:complexType name="tEmpty">
<xs:attribute name="units" type="xs:string" use="required"/>
</xs:complexType>
<xs:element name="Empty" type="tEmpty"/>
<xs:complexType name="tMixed" mixed="true">
<xs:attribute name="units" type="xs:string" use="required"/>
</xs:complexType>
<xs:element name="Mixed" type="tMixed"/>
<xs:complexType name="tSimple">
<xs:simpleContent>
<xs:extension base="xs:double">
<xs:attribute name="units" type="xs:string" use="required"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="Simple" type="tSimple" nillable="true"/>
<xs:element name="Something"/>
</xs:schema>'''
code = pyxb.binding.generate.GeneratePython(schema_text=xsd)
#open('code.py', 'w').write(code)
#print code
rv = compile(code, 'test', 'exec')
eval(rv)
from pyxb.exceptions_ import *
import unittest
class TestTrac_200907231705 (unittest.TestCase):
def testParsing (self):
xml = '<Empty units="m"/>'
instance = CreateFromDocument(xml)
self.assertEqual(pyxb.binding.basis.complexTypeDefinition._CT_EMPTY, instance._ContentTypeTag)
self.assertTrue(instance.validateBinding())
xml = '<Empty units="m">5</Empty>'
self.assertRaises(pyxb.MixedContentError, CreateFromDocument, xml)
xml = '<Mixed units="m"/>'
instance = CreateFromDocument(xml)
self.assertEqual(pyxb.binding.basis.complexTypeDefinition._CT_MIXED, instance._ContentTypeTag)
xml = '<Mixed units="m">5</Mixed>'
instance = CreateFromDocument(xml)
self.assertEqual(pyxb.binding.basis.complexTypeDefinition._CT_MIXED, instance._ContentTypeTag)
self.assertEqual(six.u('5'), instance.orderedContent()[0].value)
xml = '<Mixed units="m">5<Something/>4</Mixed>'
self.assertRaises(pyxb.UnrecognizedContentError, CreateFromDocument, xml)
xml = '<Simple units="m"/>'
self.assertRaises(pyxb.SimpleContentAbsentError, CreateFromDocument, xml)
def testCtorEmpty (self):
instance = Empty()
self.assertRaises(pyxb.AttributeValidationError, instance.validateBinding)
instance = Empty(units='m')
self.assertTrue(instance.validateBinding())
self.assertRaises(pyxb.MixedContentError, Empty, 4, units='m')
def testCtorMixed (self):
instance = Mixed()
self.assertRaises(pyxb.AttributeValidationError, instance.validateBinding)
instance = Mixed(units='m')
self.assertTrue(instance.validateBinding())
instance = Mixed(4, units='m')
self.assertTrue(instance.validateBinding())
self.assertEqual(six.u('4'), instance.orderedContent()[0].value)
instance = Mixed(xs.int(4), units='m')
self.assertTrue(instance.validateBinding())
self.assertEqual(six.u('4'), instance.orderedContent()[0].value)
def testCtorSimple (self):
self.assertRaises(pyxb.SimpleContentAbsentError, Simple)
instance = Simple(4)
self.assertRaises(pyxb.AttributeValidationError, instance.validateBinding)
self.assertRaises(pyxb.SimpleContentAbsentError, Simple, units='m')
instance = Simple(4.5, units='m')
self.assertEqual(4.5, instance.value())
def testParsingNil (self):
xml = '<Simple xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:nil="true" units="m"/>'
instance = CreateFromDocument(xml)
self.assertEqual(pyxb.binding.basis.complexTypeDefinition._CT_SIMPLE, instance._ContentTypeTag)
self.assertTrue(instance.validateBinding())
self.assertTrue(instance.value() is None)
if __name__ == '__main__':
unittest.main()
|
2dfe397dfd68c140de73b1be941127d0dc7d7df9
|
753aafa747871f556600b28dbb867298132b1e6b
|
/supervisely/app/widgets/task_logs/task_logs.py
|
694bb136baa3267447ab3e97ce3a95abac0f0e6f
|
[
"Apache-2.0"
] |
permissive
|
supervisely/supervisely
|
85dd63e5ccb590b2861271ef7bd5401aa2a99038
|
f0df756b8fb89364202fde54e6ef5fe89fca089d
|
refs/heads/master
| 2023-08-27T07:29:57.682377
| 2023-08-24T13:17:31
| 2023-08-24T13:17:31
| 140,302,908
| 447
| 91
|
Apache-2.0
| 2023-09-13T11:11:09
| 2018-07-09T15:09:32
|
Python
|
UTF-8
|
Python
| false
| false
| 1,052
|
py
|
task_logs.py
|
from typing import Dict, Union
from supervisely.app import DataJson
from supervisely.app.widgets import Widget
from supervisely import is_development
class TaskLogs(Widget):
def __init__(
self,
task_id: int = None,
widget_id: str = None,
):
self._task_id = task_id
self._is_development = is_development()
super().__init__(widget_id=widget_id, file_path=__file__)
def get_json_data(self) -> Dict:
return {"taskId": self._task_id}
def get_json_state(self) -> Dict:
return {}
def get_task_id(self) -> int:
return DataJson()[self.widget_id]["taskId"]
def _set_task_id(self, task_id: Union[int, None]):
self._task_id = task_id
DataJson()[self.widget_id]["taskId"] = self._task_id
DataJson().send_changes()
def set_task_id(self, task_id: int):
self._set_task_id(None)
if type(task_id) != int:
raise TypeError(f"task_id must be int, but {type(task_id)} was given")
self._set_task_id(task_id)
|
2969b9db57431e1aad0714edc2d75f5f689126d6
|
7bc1d8634529eac952490399fb71f10bcedf05cc
|
/tests/scripts/thread-cert/pktverify/bytes.py
|
c64271739f89e041c973d4356f44ca7c72cd3ebb
|
[
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause"
] |
permissive
|
openthread/openthread
|
6a9e25d1cd224bde9796d9616f04f423dba27d77
|
102a631cb3f8938389d0d10199a14c59184039cd
|
refs/heads/main
| 2023-08-18T10:46:03.820124
| 2023-08-17T22:20:55
| 2023-08-17T22:20:55
| 55,808,787
| 3,485
| 1,296
|
BSD-3-Clause
| 2023-09-14T15:50:53
| 2016-04-08T20:47:41
|
C++
|
UTF-8
|
Python
| false
| false
| 5,686
|
py
|
bytes.py
|
#!/usr/bin/env python3
#
# Copyright (c) 2019, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import sys
from typing import Union, Any
class Bytes(bytearray):
"""Bytes represents a byte array which is able to handle strings of flexible formats"""
def __init__(self, s: Union[str, bytearray, 'Bytes', Any]):
if isinstance(s, str):
try:
s = Bytes._parse_compact(s)
except ValueError:
try:
s = Bytes._parse_octets(s)
except ValueError:
s = Bytes._parse_hextets(s)
super().__init__(s)
def __hash__(self):
return hash(bytes(self))
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.format_compact())
def format_compact(self) -> str:
"""
Converts the Bytes to a compact string (without ":").
"""
return ''.join('%02x' % b for b in self)
def format_octets(self) -> str:
"""
Converts the Bytes to a string of octets separated by ":".
"""
return ':'.join('%02x' % b for b in self)
def format_hextets(self) -> str:
"""
Converts the Bytes to a string of hextets separated by ":"
"""
assert len(self) % 2 == 0, self.format_octets()
return ':'.join('%04x' % (self[i] * 256 + self[i + 1]) for i in range(0, len(self), 2))
__str__ = format_octets
@staticmethod
def _parse_compact(s: str) -> bytearray:
try:
assert len(s) % 2 == 0
return bytearray(int(s[i:i + 2], 16) for i in range(0, len(s), 2))
except Exception:
raise ValueError(s)
@staticmethod
def _parse_octets(s: str) -> bytearray:
try:
assert len(s) % 3 == 2 or not s
if not s:
return bytearray(b"")
return bytearray(int(x, 16) for x in s.split(':'))
except Exception:
raise ValueError(s)
@staticmethod
def _parse_hextets(s) -> bytearray:
try:
assert len(s) % 5 == 4 or not s
if not s:
return bytearray(b"")
return bytearray(int(x[i:i + 2], 16) for x in s.split(':') for i in (0, 2))
except Exception:
raise ValueError(s)
def __getitem__(self, item) -> Union['Bytes', int]:
"""
Get self[item].
:param item: index or slice to retrieve
:return: the byte value at specified index or sub `Bytes` if item is slice
"""
x = super().__getitem__(item)
if isinstance(x, bytearray):
return Bytes(x)
else:
return x
def __eq__(self, other: Union[str, 'Bytes']):
"""
Check if bytes is equal to other.
"""
if other is None:
return False
elif not isinstance(other, Bytes):
other = self.__class__(other)
eq = super().__eq__(other)
print("[%r %s %r]" % (self, "==" if eq else "!=", other), file=sys.stderr)
return eq
if __name__ == '__main__':
# some simple tests
x = Bytes(b"\x01\x02\x03\x04")
assert eval(repr(x)) == x, repr(x) # representation of Bytes should be able to be evaluated back
assert x == str(x), (x, str(x))
assert x.format_compact() == "01020304", x.format_compact()
assert x.format_octets() == "01:02:03:04", x.format_octets()
assert x.format_hextets() == "0102:0304", x.format_hextets()
assert Bytes._parse_compact("") == Bytes(b"")
assert Bytes._parse_compact('01020304') == x
assert Bytes._parse_octets("") == Bytes(b"")
assert Bytes._parse_octets('01:02:03:04') == x
assert Bytes._parse_hextets("") == Bytes(b"")
assert Bytes._parse_hextets('0102:0304') == x
assert isinstance(x[:2], Bytes)
assert isinstance(x[-2:], Bytes)
assert x[:2] == Bytes(b'\x01\x02')
assert x[-2:] == Bytes(b'\x03\x04')
# should also parse string formats
assert Bytes("01020304") == Bytes(b"\x01\x02\x03\x04")
assert Bytes("01:02:03:04") == Bytes(b"\x01\x02\x03\x04")
assert Bytes("0102:0304") == Bytes(b"\x01\x02\x03\x04")
|
e2d3aa811bdfb65e0533463e35e50ddbabfbedd5
|
1adebf72de7aa7147b1148ba35280645fbe5bbd3
|
/dev/etc/pending_ugens/GrainSin.py
|
e11203626ae24d2fe1be587c5288f0f4148729ff
|
[
"MIT"
] |
permissive
|
josiah-wolf-oberholtzer/supriya
|
d0c4f921a06e3f9df40f91a226a1c038d3ef84d5
|
2ebf835ce9bbfca19e4220628a32c30fa66e04f7
|
refs/heads/main
| 2023-07-20T00:06:23.955530
| 2023-07-18T03:02:14
| 2023-07-18T03:02:14
| 17,463,359
| 227
| 28
|
MIT
| 2023-07-18T03:02:15
| 2014-03-06T02:27:25
|
Python
|
UTF-8
|
Python
| false
| false
| 6,759
|
py
|
GrainSin.py
|
import collections
from supriya.enums import CalculationRate
from supriya.synthdefs import MultiOutUGen
class GrainSin(MultiOutUGen):
"""
::
>>> grain_sin = supriya.ugens.GrainSin.ar(
... channel_count=1,
... duration=1,
... envbufnum=-1,
... frequency=440,
... max_grains=512,
... pan=0,
... trigger=0,
... )
>>> grain_sin
GrainSin.ar()
"""
### CLASS VARIABLES ###
_ordered_input_names = collections.OrderedDict(
'channel_count',
'trigger',
'duration',
'frequency',
'pan',
'envbufnum',
'max_grains',
)
_valid_calculation_rates = None
### INITIALIZER ###
def __init__(
self,
calculation_rate=None,
channel_count=1,
duration=1,
envbufnum=-1,
frequency=440,
max_grains=512,
pan=0,
trigger=0,
):
MultiOutUGen.__init__(
self,
calculation_rate=calculation_rate,
channel_count=channel_count,
duration=duration,
envbufnum=envbufnum,
frequency=frequency,
max_grains=max_grains,
pan=pan,
trigger=trigger,
)
### PUBLIC METHODS ###
@classmethod
def ar(
cls,
channel_count=1,
duration=1,
envbufnum=-1,
frequency=440,
max_grains=512,
pan=0,
trigger=0,
):
"""
Constructs an audio-rate GrainSin.
::
>>> grain_sin = supriya.ugens.GrainSin.ar(
... channel_count=1,
... duration=1,
... envbufnum=-1,
... frequency=440,
... max_grains=512,
... pan=0,
... trigger=0,
... )
>>> grain_sin
GrainSin.ar()
Returns ugen graph.
"""
import supriya.synthdefs
calculation_rate = supriya.CalculationRate.AUDIO
ugen = cls._new_expanded(
calculation_rate=calculation_rate,
channel_count=channel_count,
duration=duration,
envbufnum=envbufnum,
frequency=frequency,
max_grains=max_grains,
pan=pan,
trigger=trigger,
)
return ugen
# def newFromDesc(): ...
### PUBLIC PROPERTIES ###
@property
def channel_count(self):
"""
Gets `channel_count` input of GrainSin.
::
>>> grain_sin = supriya.ugens.GrainSin.ar(
... channel_count=1,
... duration=1,
... envbufnum=-1,
... frequency=440,
... max_grains=512,
... pan=0,
... trigger=0,
... )
>>> grain_sin.channel_count
1.0
Returns ugen input.
"""
index = self._ordered_input_names.index('channel_count')
return self._inputs[index]
@property
def duration(self):
"""
Gets `duration` input of GrainSin.
::
>>> grain_sin = supriya.ugens.GrainSin.ar(
... channel_count=1,
... duration=1,
... envbufnum=-1,
... frequency=440,
... max_grains=512,
... pan=0,
... trigger=0,
... )
>>> grain_sin.duration
1.0
Returns ugen input.
"""
index = self._ordered_input_names.index('duration')
return self._inputs[index]
@property
def envbufnum(self):
"""
Gets `envbufnum` input of GrainSin.
::
>>> grain_sin = supriya.ugens.GrainSin.ar(
... channel_count=1,
... duration=1,
... envbufnum=-1,
... frequency=440,
... max_grains=512,
... pan=0,
... trigger=0,
... )
>>> grain_sin.envbufnum
-1.0
Returns ugen input.
"""
index = self._ordered_input_names.index('envbufnum')
return self._inputs[index]
@property
def frequency(self):
"""
Gets `frequency` input of GrainSin.
::
>>> grain_sin = supriya.ugens.GrainSin.ar(
... channel_count=1,
... duration=1,
... envbufnum=-1,
... frequency=440,
... max_grains=512,
... pan=0,
... trigger=0,
... )
>>> grain_sin.frequency
440.0
Returns ugen input.
"""
index = self._ordered_input_names.index('frequency')
return self._inputs[index]
@property
def max_grains(self):
"""
Gets `max_grains` input of GrainSin.
::
>>> grain_sin = supriya.ugens.GrainSin.ar(
... channel_count=1,
... duration=1,
... envbufnum=-1,
... frequency=440,
... max_grains=512,
... pan=0,
... trigger=0,
... )
>>> grain_sin.max_grains
512.0
Returns ugen input.
"""
index = self._ordered_input_names.index('max_grains')
return self._inputs[index]
@property
def pan(self):
"""
Gets `pan` input of GrainSin.
::
>>> grain_sin = supriya.ugens.GrainSin.ar(
... channel_count=1,
... duration=1,
... envbufnum=-1,
... frequency=440,
... max_grains=512,
... pan=0,
... trigger=0,
... )
>>> grain_sin.pan
0.0
Returns ugen input.
"""
index = self._ordered_input_names.index('pan')
return self._inputs[index]
@property
def trigger(self):
"""
Gets `trigger` input of GrainSin.
::
>>> grain_sin = supriya.ugens.GrainSin.ar(
... channel_count=1,
... duration=1,
... envbufnum=-1,
... frequency=440,
... max_grains=512,
... pan=0,
... trigger=0,
... )
>>> grain_sin.trigger
0.0
Returns ugen input.
"""
index = self._ordered_input_names.index('trigger')
return self._inputs[index]
|
b85e53acbe16f66a08e82620706f746da9f1c878
|
307821fb88d61d487943b30a229a849c7a362d71
|
/hydro_cli/hydro/__init__.py
|
1b983f5101802fd7601816640f62ab7b25e461a7
|
[
"Apache-2.0"
] |
permissive
|
hydro-project/hydroflow
|
5a216904a9bf8d8f3d875682267d0edf843b9fb6
|
3f45ec10f0bcc5484f3e116369cdf66ec53b506f
|
refs/heads/main
| 2023-08-27T00:57:19.959242
| 2023-08-25T17:42:33
| 2023-08-25T17:42:33
| 399,648,216
| 329
| 82
|
Apache-2.0
| 2023-09-14T13:05:14
| 2021-08-25T01:05:30
|
Rust
|
UTF-8
|
Python
| false
| false
| 26
|
py
|
__init__.py
|
from hydro._core import *
|
5832d30fc84a38e439a080fee55f7badab4c69cd
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/studio/micro-services/cube-studio/job-template/job/model_template/__init__.py
|
ba23e413cb559d4560cb69b610fe807add8e302c
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"EPL-2.0",
"LGPL-2.1-only",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 80
|
py
|
__init__.py
|
# coding=utf-8
# @Time : 2021/7/5 20:05
# @Auther : lionpeng@tencent.com
|
3dd00deff75891c10155c3885e783de96a468158
|
967e5d3f3cdb9e6c458ae516452ddbb342a200e6
|
/build_cvode.py
|
ffe7a0470ad1dd524e22d58db086e1f384442ffc
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT",
"CC-BY-4.0"
] |
permissive
|
CATIA-Systems/FMPy
|
46685e67c627613955c04558bdd7ed63d9c6ebbb
|
5cec29bd25937885c7d4062766891dea5fcc4bdb
|
refs/heads/main
| 2023-09-04T01:17:29.851581
| 2023-08-21T15:21:09
| 2023-08-21T15:21:09
| 91,576,871
| 339
| 120
|
NOASSERTION
| 2023-08-22T18:27:02
| 2017-05-17T12:59:50
|
Python
|
UTF-8
|
Python
| false
| false
| 3,250
|
py
|
build_cvode.py
|
from fmpy import sharedLibraryExtension, platform
from fmpy import platform_tuple as current_platform_tuple
from fmpy.util import download_file
import tarfile
import os
import shutil
from subprocess import check_call
configuration = 'Release'
if os.name == 'nt':
generators = [
('win32', ['-G', 'Visual Studio 17 2022', '-A', 'Win32'], 'i686-windows'),
('win64', ['-G', 'Visual Studio 17 2022', '-A', 'x64'], 'x86_64-windows')
]
sl_prefix = ''
sl_suffix = sharedLibraryExtension
else:
generators = [(platform, ['-G', 'Unix Makefiles'], current_platform_tuple)]
sl_prefix = 'lib'
sl_suffix = sharedLibraryExtension
# clean up
shutil.rmtree('sundials-5.3.0', ignore_errors=True)
filename = download_file(url='https://github.com/LLNL/sundials/releases/download/v5.3.0/sundials-5.3.0.tar.gz',
checksum='88dff7e11a366853d8afd5de05bf197a8129a804d9d4461fb64297f1ef89bca7')
with tarfile.open(filename, "r:gz") as tar:
tar.extractall()
for platform, cmake_options, platform_tuple in generators:
os.makedirs(f'sundials-5.3.0/{platform}/static')
# build CVode as static library
check_call([
'cmake',
'-D', 'BUILD_ARKODE=OFF',
'-D', 'BUILD_CVODES=OFF',
'-D', 'BUILD_IDA=OFF',
'-D', 'BUILD_IDAS=OFF',
'-D', 'BUILD_KINSOL=OFF',
'-D', 'BUILD_SHARED_LIBS=OFF',
'-D', f'CMAKE_INSTALL_PREFIX=sundials-5.3.0/{platform}/static/install',
'-D', 'CMAKE_USER_MAKE_RULES_OVERRIDE=../OverrideMSVCFlags.cmake',
'-D', 'EXAMPLES_ENABLE_C=OFF',
'-D', 'CMAKE_OSX_ARCHITECTURES=arm64;x86_64',
'-S', 'sundials-5.3.0',
'-B', f'sundials-5.3.0/{platform}/static'
] + cmake_options)
check_call(['cmake', '--build', f'sundials-5.3.0/{platform}/static', '--target', 'install', '--config', configuration])
os.makedirs(f'sundials-5.3.0/{platform}/dynamic')
# build CVode as dynamic library
check_call([
'cmake',
'-D', 'BUILD_ARKODE=OFF',
'-D', 'BUILD_CVODES=OFF',
'-D', 'BUILD_IDA=OFF',
'-D', 'BUILD_IDAS=OFF',
'-D', 'BUILD_KINSOL=OFF',
'-D', 'BUILD_STATIC_LIBS=OFF',
'-D', 'EXAMPLES_ENABLE_C=OFF',
'-D', f'CMAKE_INSTALL_PREFIX=sundials-5.3.0/{platform}/dynamic/install',
'-D', 'CMAKE_USER_MAKE_RULES_OVERRIDE=../OverrideMSVCFlags.cmake',
'-D', 'CMAKE_OSX_ARCHITECTURES=arm64;x86_64',
'-S', 'sundials-5.3.0',
'-B', f'sundials-5.3.0/{platform}/dynamic'
] + cmake_options)
check_call(['cmake', '--build', f'sundials-5.3.0/{platform}/dynamic', '--target', 'install', '--config', configuration])
sundials_binary_dir = os.path.join('fmpy', 'sundials', platform_tuple)
os.makedirs(sundials_binary_dir, exist_ok=True)
os.path.join('sundials-5.3.0', platform, 'dynamic', 'install', 'sundials_cvode' + sharedLibraryExtension)
for name in ['sundials_cvode', 'sundials_nvecserial', 'sundials_sunlinsoldense', 'sundials_sunmatrixdense']:
src = os.path.join('sundials-5.3.0', platform, 'dynamic', 'install', 'lib', sl_prefix + name + sl_suffix)
dst = os.path.join(sundials_binary_dir, name + sl_suffix)
shutil.copy(src, dst)
|
3b11d54e9aee8b74403a1ade9cd6881ccf0e0825
|
13edccdb425fc5780b9b826386707fd3cb1b1145
|
/tools/validators/ontology_validator/yamlformat/validator/unit_lib.py
|
c53fb233d02c7a607187ee22fad3fe66a645445d
|
[
"Apache-2.0"
] |
permissive
|
google/digitalbuildings
|
5603bb958446d38e37075e9a14f89182a8e805fe
|
0ffe5b61769143826142da4bada3c712b1fd0222
|
refs/heads/master
| 2023-09-03T20:03:37.081139
| 2023-09-01T17:41:58
| 2023-09-01T17:41:58
| 240,568,500
| 319
| 132
|
Apache-2.0
| 2023-09-14T21:00:20
| 2020-02-14T17:57:23
|
Python
|
UTF-8
|
Python
| false
| false
| 16,009
|
py
|
unit_lib.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes and methods for working with units in the ontology."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from typing import NamedTuple
from yamlformat.validator import base_lib
from yamlformat.validator import config_folder_lib
from yamlformat.validator import findings_lib
UNIT_NAME_VALIDATOR = re.compile(r'^[a-z]+(_[a-z]+)*$')
STANDARD_UNIT_TAG: str = 'STANDARD'
CONVERSION_MULTIPLIER_KEY: str = 'multiplier'
CONVERSION_OFFSET_KEY: str = 'offset'
_MeasurementAlias = NamedTuple('MeasurementAlias',
[('alias_name', str), ('base_name', str),
('file_context', findings_lib.FileContext)])
# TODO(b/254872070): Add type annotations
class UnitUniverse(findings_lib.FindingsUniverse):
"""Helper class to represent the defined universe of units."""
def _GetNamespaceMapValue(self, namespace):
"""Helper method for FindingsUniverse._MakeNamespaceMap.
Used to create a map from namespace names to namespaces.
Args:
namespace: UnitNamespace
Returns:
The namespace.
"""
return namespace
def GetUnitsForMeasurement(self, measurement_type, namespace_name=''):
"""Returns a collection of units for a given measurement type.
Returns a collection of units that are defined for the given measurement
type as a dictionary from unit names to Unit objects, or None if there
are no units for that measurement type.
Args:
measurement_type: Name of the measurement subfield.
namespace_name: Name of the namespace.
"""
return self._namespace_map[namespace_name].GetUnitsForMeasurement(
measurement_type)
def GetMeasurementTypes(self, namespace_name=''):
"""Returns list of measurement types having units defined in the namespace.
Args:
namespace_name: Name of the namespace.
"""
return self._namespace_map[namespace_name].GetMeasurementTypes()
class UnitFolder(config_folder_lib.ConfigFolder):
"""Class representing a folder of Units.
Class contains all the context information and methods to validate units.
Attributes:
local_namespace: object representing the contents of the local namespace
parent_namespace: object representing the contents of the global namespace
"""
def __init__(self, folderpath, parent_namespace=None, local_subfields=None):
"""Init.
Args:
folderpath: required string with full path to the folder containing units.
Path should be relative to google3/ and have no leading or trailing /.
parent_namespace: object containing global namepsace information. When
working in the global namespace folder, this should be None.
local_subfields: required map of subfield keys to Subfields for the local
namespace.
"""
super(UnitFolder, self).__init__(folderpath, base_lib.ComponentType.UNIT)
self.local_namespace = UnitNamespace(self._namespace_name, parent_namespace,
local_subfields)
self.parent_namespace = parent_namespace
def AddUnit(self, measurement_type, unit):
"""Validates a unit and adds it to the correct namespace.
Findings will be added to the UnitFolder if validation finds any problems.
Use AddFromConfig for validation of input file. The unit will not be added
if validation fails.
Args:
measurement_type: Name of the measurement subfield.
unit: Unit object to add.
"""
if not unit.IsValid():
self.AddFindings(unit.GetFindings())
return
self.local_namespace.InsertUnit(measurement_type, unit)
def _AddFromConfigHelper(self, document, context):
"""Helper method that reads a single yaml document and adds all units found.
Args:
document: yaml document
context: config file context
"""
for measurement in document:
standard_tag_count = 0
content = document[measurement]
if isinstance(content, str):
self.local_namespace.InsertMeasurementAlias(
_MeasurementAlias(measurement, content, context))
elif isinstance(content, dict):
for unit_name, attr in content.items():
# Unit has the STANDARD tag
if isinstance(attr, str):
if attr == STANDARD_UNIT_TAG:
standard_tag_count += 1
self.AddUnit(measurement, Unit(unit_name, True, 1.0, 0.0,
context))
else:
self.AddFinding(
findings_lib.UnknownUnitTagError(unit_name, attr, context))
# Unit has a conversion map
elif isinstance(attr, dict):
self._ParseConversionInfo(unit_name, attr, measurement, context)
else:
self.AddFinding(
findings_lib.InvalidUnitFormatError(unit_name, context))
if standard_tag_count != 1:
self.AddFinding(
findings_lib.StandardUnitCountError(measurement,
standard_tag_count, context))
else:
self.AddFinding(
findings_lib.InvalidMeasurementFormatError(measurement, context))
self.local_namespace.ResolveMeasurementAliases()
def _ParseConversionInfo(self, unit_name, conversion_map, measurement,
context):
"""Helper method that reads the conversion information for a unit from yaml.
Args:
unit_name: the name of the unit
conversion_map: the dictionary containing conversion information
measurement: the measurement type of the unit
context: config file context
"""
multiplier = None
offset = None
if len(conversion_map) == 2:
for key, value in conversion_map.items():
if key == CONVERSION_MULTIPLIER_KEY:
if isinstance(value, (int, float)):
multiplier = float(value)
else:
self.AddFinding(
findings_lib.InvalidUnitConversionValueError(
unit_name, key, value, context))
elif key == CONVERSION_OFFSET_KEY:
if isinstance(value, (int, float)):
offset = float(value)
else:
self.AddFinding(
findings_lib.InvalidUnitConversionValueError(
unit_name, key, value, context))
else:
self.AddFinding(
findings_lib.InvalidUnitConversionKeyError(
unit_name, key, context))
if multiplier is not None and offset is not None:
self.AddUnit(measurement,
Unit(unit_name, False, multiplier, offset, context))
else:
self.AddFinding(
findings_lib.InvalidUnitConversionMapError(unit_name,
len(conversion_map),
context))
class UnitNamespace(findings_lib.Findings):
"""Class representing a namespace of units.
Attributes:
namespace: string name of this namespace.
parent_namespace: global UnitNamespace, or None if this is the global
namespace.
subfields: map of subfield names to Subfields defined in this namespace.
units: a map from namespace-unique unit identifiers to Unit objects defined
in this namespace.
"""
def __init__(self, namespace, parent_namespace=None, subfields=None):
"""Init.
Args:
namespace: required string representing the name of the namespace.
parent_namespace: global UnitNamespace, or None if this is the global
namespace.
subfields: optional map of subfield names to Subfields. No validation of
subfields will be performed if this is None.
"""
super(UnitNamespace, self).__init__()
self.namespace = namespace
self.parent_namespace = parent_namespace
self.subfields = subfields
self.units = {}
self._units_by_name = {}
self._units_by_measurement = {}
self._measurement_aliases = {}
def _GetDynamicFindings(self, filter_old_warnings):
findings = []
for unit in self.units.values():
findings += unit.GetFindings(filter_old_warnings)
return findings
def SubfieldsAreDefined(self):
"""Indicates whether subfields are defined.
Returns:
True if subfields have been populated for this namespace. Subfields may be
populated with an empty map and this will still return true.
"""
return self.subfields is not None
def ValidateMeasurementType(self, measurement_type, unit):
"""Validates that the unit corresponds to a measurement type subfield.
Subfields defined in either the local namespace or global namespace are
valid. If a match is not found, a finding is added to the unit.
Args:
measurement_type: Name of the measurement subfield.
unit: Unit object to validate.
"""
pns = self.parent_namespace
if (not self.SubfieldsAreDefined() or
(pns and not pns.SubfieldsAreDefined())):
# If subfields are undefined on any relevant namespace, proper validation
# is impossible. An empty subfield list counts as being defined.
return
if (measurement_type not in self.subfields and
(pns is None or measurement_type not in pns.subfields)):
unit.AddFinding(
findings_lib.UnknownMeasurementTypeError(unit, measurement_type))
def InsertUnit(self, measurement_type, unit):
"""Inserts a unit into this namespace.
If the unit already exists in the global namespace, adds a
DuplicateUnitDefinitionError to the findings and the duplicate is not
inserted. If the unit is being inserted into a namespace other than the
global namespace, an InvalidUnitNamespaceError will be added to findings.
Args:
measurement_type: Name of the measurement subfield.
unit: unit object to attempt to insert.
"""
if unit.name != 'no_units' and unit.name in self._units_by_name:
self.AddFinding(
findings_lib.DuplicateUnitDefinitionError(
self, unit, self._units_by_name[unit.name].file_context))
return
# Assert namespace is global namespace otherwise add finding.
elif self.parent_namespace is not None:
self.AddFinding(
findings_lib.InvalidUnitNamespaceError(self.namespace,
unit.file_context))
self._InsertEffectiveUnit(measurement_type, unit)
def _InsertEffectiveUnit(self, measurement_type, unit):
"""Inserts a unit into this namespace.
Does not check for uniqueness
within the namespace.
If the unit already exists in the measurement, adds a
DuplicateUnitDefinitionError to the findings and the duplicate is not
inserted.
Args:
measurement_type: Name of the measurement subfield.
unit: unit object to attempt to insert.
"""
self.ValidateMeasurementType(measurement_type, unit)
measurement_units = self._units_by_measurement.setdefault(
measurement_type, {})
if unit.name in measurement_units:
self.AddFinding(
findings_lib.DuplicateUnitDefinitionError(
self, unit, measurement_units[unit.name].file_context))
return
measurement_units[unit.name] = unit
self._units_by_name[unit.name] = unit
# unit_key is an opaque ID that is unique within the namespace. It is only
# used by the backward compatibility checking, which needs all of the units
# to be in a single dict.
unit_key = f'{measurement_type}-{unit.name}'
self.units[unit_key] = unit
def InsertMeasurementAlias(self, alias):
"""Inserts a measurement alias into this namespace.
If the alias already exists in the namespace, adds a
DuplicateMeasurementAliasError to the findings and the duplicate is not
inserted.
Args:
alias: _MeasurementAlias object to insert.
"""
if alias.alias_name in self._measurement_aliases:
prev_instance = self._measurement_aliases[alias.alias_name]
self.AddFinding(
findings_lib.DuplicateMeasurementAliasError(
self, alias, prev_instance.file_context))
return
self._measurement_aliases[alias.alias_name] = alias
# pylint: disable=line-too-long
def ResolveMeasurementAliases(self):
"""Validates all measurement alias references and populates the collections of units for all aliased measurement types.
"""
for alias in self._measurement_aliases.values():
if alias.base_name in self._measurement_aliases:
self.AddFinding(findings_lib.MeasurementAliasIsAliasedError(alias))
elif alias.base_name not in self._units_by_measurement:
self.AddFinding(
findings_lib.UnrecognizedMeasurementAliasBaseError(alias))
else:
for unit in self._units_by_measurement[alias.base_name].values():
self._InsertEffectiveUnit(alias.alias_name, unit)
def GetUnitsForMeasurement(self, measurement_type):
"""Returns the collection of units that are defined for the given measurement type as a dictionary from unit names to Unit objects, or None if there are no units for that measurement type.
Args:
measurement_type: Name of the measurement subfield.
"""
return self._units_by_measurement.get(measurement_type)
def GetMeasurementTypes(self):
"""Returns the list of measurement type names that have units defined in the namespace.
"""
return self._units_by_measurement.keys()
class Unit(findings_lib.Findings):
"""Namespace-unaware class representing an individual unit definition.
Attributes:
name: the full name (without namespace) of this unit
is_standard: whether this is the standard unit for the measurement type
conversion_multiplier: the number to multiply by to convert a value using
this unit to the standard unit
conversion_offset: the number to add to convert a value using this unit to
the standard unit
file_context: the config file context for where this unit was defined
"""
def __init__(self,
name,
is_standard=False,
conversion_multiplier=1.0,
conversion_offset=0.0,
file_context=None):
"""Init.
Args:
name: required string name for the unit
is_standard: whether this is the standard unit for the measurement type
conversion_multiplier: the number to multiply by to convert a value using
this unit to the standard unit
conversion_offset: the number to add to convert a value using this unit to
the standard unit
file_context: optional object with the config file location of this unit.
Returns:
Instance of Unit class.
"""
super(Unit, self).__init__()
self.name = name
self.is_standard = is_standard
self.conversion_multiplier = conversion_multiplier
self.conversion_offset = conversion_offset
self.file_context = file_context
if not isinstance(name, str):
self.AddFinding(findings_lib.IllegalKeyTypeError(name, file_context))
elif not UNIT_NAME_VALIDATOR.match(name):
self.AddFinding(findings_lib.InvalidUnitNameError(name, file_context))
def __eq__(self, other):
if isinstance(other, Unit):
return self.name == other.name and self.is_standard == other.is_standard
return False
def __ne__(self, other):
return not self.__eq__(other)
__hash__ = None
|
dd648f8c181feb4600de70546072bee1cd0021f4
|
8ed15d43652dbcab332c78923da416b91b139323
|
/python/app/fednlp/data/raw_data_loader/obsolete/SQuAD_1_1/data_loader.py
|
890a9aa4924198d63446b6e8671e7e7553eb57ef
|
[
"Apache-2.0"
] |
permissive
|
FedML-AI/FedML
|
74d144038c9de4a0621eb328d00987abac35e2d1
|
b436fbd95cbb62f6c58d2233d7affa0f62cb1817
|
refs/heads/master
| 2023-08-31T22:15:39.786371
| 2023-08-24T03:41:58
| 2023-08-24T03:41:58
| 281,519,510
| 3,197
| 807
|
Apache-2.0
| 2023-09-14T02:14:20
| 2020-07-21T22:41:25
|
Python
|
UTF-8
|
Python
| false
| false
| 8,931
|
py
|
data_loader.py
|
# import json
# import os
# import random
# import re
# import nltk
#
# from data_preprocessing.base.base_raw_data_loader import BaseRawDataLoader
# class RawDataLoader(BaseRawDataLoader):
# def __init__(self, data_path):
# super().__init__(data_path)
# self.task_type = "span_extraction"
# self.document_X = []
# self.question_X = []
# self.attributes = dict()
# self.train_file_name = "train-v1.1.json"
# self.test_file_name = "dev-v1.1.json"
# def data_loader(self):
# if len(self.document_X) == 0 or len(self.question_X) == 0 or len(self.Y) == 0:
# context_X, question_X, Y, question_ids = self.process_data(os.path.join(self.data_path, self.train_file_name))
# train_size = len(context_X)
# temp = self.process_data(os.path.join(self.data_path, self.test_file_name))
# context_X.extend(temp[0])
# question_X.extend(temp[1])
# Y.extend(temp[2])
# question_ids.extend(temp[3])
# train_index_list = [i for i in range(train_size)]
# test_index_list = [i for i in range(train_size, len(context_X))]
# index_list = train_index_list + test_index_list
# self.context_X = {i: d for i, d in enumerate(context_X)}
# self.question_X = {i: d for i, d in enumerate(question_X)}
# self.question_ids = {i: d for i, d in enumerate(question_ids)}
# self.Y = {i: d for i, d in enumerate(Y)}
# self.attributes["train_index_list"] = train_index_list
# self.attributes["test_index_list"] = test_index_list
# self.attributes["index_list"] = index_list
# return {"context_X": self.context_X, "question_X": self.question_X, "Y": self.Y, "question_ids": self.question_ids,
# "attributes": self.attributes, "task_type": self.task_type}
# def process_data(self, file_path):
# context_X = []
# question_X = []
# Y = []
# question_ids = []
# if "doc_index" not in self.attributes:
# self.attributes["doc_index"] = []
# with open(file_path, "r", encoding='utf-8') as f:
# data = json.load(f)
# for doc_idx, document in enumerate(data["data"]):
# for paragraph in document["paragraphs"]:
# for qas in paragraph["qas"]:
# for answer in qas["answers"]:
# context_X.append(paragraph["context"])
# question_X.append(qas["question"])
# start = answer["answer_start"]
# end = start + len(answer["text"].rstrip())
# Y.append((start, end))
# question_ids.append(qas["id"])
# self.attributes["doc_index"].append(doc_idx)
# return context_X, question_X, Y, question_ids
# # TODO: Unified Partition Interface
# @staticmethod
# def nature_partition(attributes):
# train_doc_index_set = set([attributes["doc_index"][i] for i in attributes["train_index_list"]])
# partition_dict = dict()
# partition_dict["partition_data"] = dict()
# partition_dict["n_clients"] = len(train_doc_index_set)
# for doc_id in train_doc_index_set:
# for i in attributes["train_index_list"]:
# if attributes["doc_index"][i] == doc_id:
# if doc_id not in partition_dict["partition_data"]:
# partition_dict["partition_data"][doc_id] = dict()
# partition_dict["partition_data"][doc_id]["train"] = list()
# partition_dict["partition_data"][doc_id]["test"] = list()
# partition_dict["partition_data"][doc_id]["train"].append(i)
# test_doc_index_set = set([attributes["doc_index"][i] for i in attributes["test_index_list"]])
# for doc_id in test_doc_index_set:
# test_doc_index_list = []
# for i in attributes["test_index_list"]:
# if attributes["doc_index"][i] == doc_id:
# test_doc_index_list.append(i)
# client_idx = random.randint(0, partition_dict["n_clients"] - 1)
# partition_dict["partition_data"][client_idx]["test"].extend(test_doc_index_list)
# return partition_dict
# class ClientDataLoader(BaseClientDataLoader):
# def __init__(self, data_path, partition_path, client_idx=None, partition_method="uniform", tokenize=False, data_filter=None):
# data_fields = ["context_X", "question_X", "Y", "question_ids"]
# super().__init__(data_path, partition_path, client_idx, partition_method, tokenize, data_fields)
# self.clean_data()
# if self.tokenize:
# self.tokenize_data()
# self.transform_labels()
# if data_filter:
# data_filter(self.train_data)
# data_filter(self.test_data)
# def clean_data(self):
# def __clean_data(data):
# for i in range(len(data["context_X"])):
# data["context_X"][i] = data["context_X"][i].replace("''", '" ').replace("``", '" ')
# __clean_data(self.train_data)
# __clean_data(self.test_data)
# def tokenize_data(self):
# def word_tokenize(sent):
# return [token.replace("''", '"').replace("``", '"') for token in nltk.word_tokenize(sent)]
# def __tokenize_data(data):
# data["tokenized_context_X"] = list()
# data["tokenized_question_X"] = list()
# data["char_context_X"] = list()
# data["char_question_X"] = list()
# self.data_fields.extend(["tokenized_context_X", "tokenized_question_X", "char_context_X", "char_question_X"])
# for i in range(len(data["context_X"])):
# temp_tokens = word_tokenize(data["context_X"][i])
# data["tokenized_context_X"].append(self.remove_stop_tokens(temp_tokens))
# data["tokenized_question_X"].append(word_tokenize(data["question_X"][i]))
# context_chars = [list(token) for token in data["tokenized_context_X"][i]]
# question_chars = [list(token) for token in data["tokenized_question_X"][i]]
# data["char_context_X"].append(context_chars)
# data["char_question_X"].append(question_chars)
# __tokenize_data(self.train_data)
# __tokenize_data(self.test_data)
# def remove_stop_tokens(self, temp_tokens):
# tokens = []
# for token in temp_tokens:
# flag = False
# l = ("-", "\u2212", "\u2014", "\u2013", "/", "~", '"', "'", "\u201C", "\u2019", "\u201D", "\u2018", "\u00B0")
# tokens.extend(re.split("([{}])".format("".join(l)), token))
# return tokens
# def transform_labels(self):
# def __transform_labels(data):
# for i in range(len(data["context_X"])):
# context = data["context_X"][i]
# context_tokens = data["tokenized_context_X"][i]
# start, stop = data["Y"][i]
# spans = self.get_spans(context, context_tokens)
# idxs = []
# for word_idx, span in enumerate(spans):
# if not (stop <= span[0] or start >= span[1]):
# idxs.append(word_idx)
# data["Y"][i] = (idxs[0], idxs[-1] + 1)
# __transform_labels(self.train_data)
# __transform_labels(self.test_data)
# def get_spans(self, text, all_tokens):
# spans = []
# cur_idx = 0
# for token in all_tokens:
# if text.find(token, cur_idx) < 0:
# print("{} {} {}".format(token, cur_idx, text))
# raise Exception()
# cur_idx = text.find(token, cur_idx)
# spans.append((cur_idx, cur_idx + len(token)))
# cur_idx += len(token)
# return spans
# def get_normal_format(dataset, cut_off=None):
# """
# reformat the dataset to normal version.
# """
# reformatted_data = []
# assert len(dataset["context_X"]) == len(dataset["question_X"]) == len(dataset["Y"]) == len(dataset["question_ids"])
# for c, q, a, qid in zip(dataset["context_X"], dataset["question_X"], dataset["Y"], dataset["question_ids"]):
# item = {}
# item["context"] = c
# item["qas"] = [
# {
# # "id": "%d"%(len(reformatted_data)+1),
# "id": qid,
# "is_impossible": False,
# "question": q,
# "answers": [{"text": c[a[0]:a[1]], "answer_start": a[0]}],
# }
# ]
# reformatted_data.append(item)
# return reformatted_data[:cut_off]
|
ab626189093588a027053c39d0c4ba0a401de3ba
|
866233a68914df7b8f0d89efb44b27ec29368d30
|
/src/aioprometheus/service.py
|
f715381921e14a8b89ddeb375dc2611d90d1ca9e
|
[
"MIT"
] |
permissive
|
claws/aioprometheus
|
22d442abcdb6a83a31025cd36bfc967150b9570b
|
4786678b413d166c0b6e0041558d11bc1a7097b2
|
refs/heads/master
| 2023-03-24T19:41:07.253289
| 2023-03-14T05:55:27
| 2023-03-14T05:55:27
| 61,433,356
| 142
| 22
| null | 2023-03-14T05:55:29
| 2016-06-18T12:53:23
|
Python
|
UTF-8
|
Python
| false
| false
| 7,112
|
py
|
service.py
|
"""
This module implements an asynchronous Prometheus metrics export service.
"""
import logging
try:
import aiohttp
except ImportError as exc:
raise ImportError(
"`aiohttp` could not be imported. Did you install `aioprometheus` "
"with the `aiohttp` extra?"
) from exc
# imports only used for type annotations
from ssl import SSLContext
from typing import Optional
import aiohttp.web
from aiohttp.hdrs import ACCEPT
from aiohttp.hdrs import METH_GET as GET
from aioprometheus import REGISTRY, Registry, render
logger = logging.getLogger(__name__)
DEFAULT_METRICS_PATH = "/metrics"
class Service:
"""
This class implements a Prometheus metrics service that can
be embedded within asyncio based applications so they can be scraped
by the Prometheus.io server.
"""
def __init__(self, registry: Registry = REGISTRY) -> None:
"""
Initialise the Prometheus metrics service.
:param registry: The :class:`Registry` instance that holds all the
metrics that this service should expose. If no registry is specified
then the default registry will be used.
:raises: Exception if the registry object passed is not an instance of
the Registry type.
"""
if not isinstance(registry, Registry):
raise Exception(f"registry must be a Registry, got: {registry}")
self.registry = registry
self._site: Optional[aiohttp.web.TCPSite] = None
self._app: Optional[aiohttp.web.Application] = None
self._runner: Optional[aiohttp.web.AppRunner] = None
self._https = False
self._root_url = "/"
self._metrics_url: Optional[str] = None
@property
def base_url(self) -> str:
"""Return the base service url
:raises: Exception if the server has not been started.
:return: the base service URL as a string
"""
if self._site is None:
raise Exception(
"No URL available, Prometheus metrics server is not running"
)
# Keep mypy happy by checking runner is not None.
if self._runner is None:
raise Exception(
"No URL available, Prometheus metrics server is not running"
)
# IPv4 address returns a 2-tuple, IPv6 returns a 4-tuple
host, port, *_ = self._runner.addresses[0]
scheme = f"http{'s' if self._https else ''}"
host = host if ":" not in host else f"[{host}]"
url = f"{scheme}://{host}:{port}"
return url
@property
def root_url(self) -> str:
"""Return the root service url
:raises: Exception if the server has not been started.
:return: the root URL as a string
"""
return f"{self.base_url}{self._root_url}"
@property
def metrics_url(self) -> str:
"""Return the Prometheus metrics url
:raises: Exception if the server has not been started.
:return: the metrics URL as a string
"""
return f"{self.base_url}{self._metrics_url}"
async def start(
self,
addr: str = "",
port: int = 0,
ssl: Optional[SSLContext] = None,
metrics_url: str = DEFAULT_METRICS_PATH,
) -> None:
"""Start the prometheus metrics HTTP(S) server.
:param addr: the address to bind the server on. By default this is
set to an empty string so that the service becomes available on
all interfaces.
:param port: The port to bind the server on. The default value is 0
which will cause the server to bind to an ephemeral port. If you
want the server to operate on a fixed port then you need to specify
the port.
:param ssl: a sslContext for use with TLS.
:param metrics_url: The name of the endpoint route to expose
prometheus metrics on. Defaults to '/metrics'.
:raises: Exception if the server could not be started.
"""
logger.debug(
f"Prometheus metrics server starting on {addr}:{port}{metrics_url}"
)
if self._site:
logger.warning("Prometheus metrics server is already running")
return
self._app = aiohttp.web.Application()
self._metrics_url = metrics_url
self._app["metrics_url"] = metrics_url
self._app.router.add_route(GET, metrics_url, self.handle_metrics)
self._app.router.add_route(GET, self._root_url, self.handle_root)
self._app.router.add_route(GET, "/robots.txt", self.handle_robots)
self._runner = aiohttp.web.AppRunner(self._app)
await self._runner.setup()
self._https = ssl is not None
try:
self._site = aiohttp.web.TCPSite(
self._runner, addr, port, ssl_context=ssl, shutdown_timeout=2.0
)
await self._site.start()
except Exception:
logger.exception("error creating metrics server")
raise
logger.debug(f"Prometheus metrics server started on {self.metrics_url}")
async def stop(self) -> None:
"""Stop the prometheus metrics HTTP(S) server"""
logger.debug("Prometheus metrics server stopping")
if self._site is None:
logger.warning("Prometheus metrics server is already stopped")
return
# Keep mypy happy by checking runner is not None.
if self._runner is None:
raise Exception("Prometheus metrics server is not running")
await self._runner.cleanup()
self._site = None
self._app = None
self._runner = None
logger.debug("Prometheus metrics server stopped")
async def handle_metrics(
self, request: "aiohttp.web.Request"
) -> "aiohttp.web.Response":
"""Handle a request to the metrics route.
The request is inspected and the most efficient response data format
is chosen.
"""
content, http_headers = render(
self.registry, request.headers.getall(ACCEPT, [])
)
return aiohttp.web.Response(body=content, headers=http_headers)
async def handle_root(
self, request: "aiohttp.web.Request"
) -> "aiohttp.web.Response":
"""Handle a request to the / route.
Serves a trivial page with a link to the metrics. Use this if ever
you need to point a health check at your the service.
"""
metrics_url = request.app["metrics_url"]
return aiohttp.web.Response(
content_type="text/html",
text=f"<html><body><a href='{metrics_url}'>metrics</a></body></html>",
)
async def handle_robots(
self,
request: "aiohttp.web.Request", # pylint: disable=unused-argument
) -> "aiohttp.web.Response":
"""Handle a request to /robots.txt
If a robot ever stumbles on this server, discourage it from indexing.
"""
return aiohttp.web.Response(
content_type="text/plain", text="User-agent: *\nDisallow: /\n"
)
|
8f7691d0cce88d0a9e6b5de0787fd807559f1272
|
f6492af1b4c162c7d76edc7de5fa671dff963b11
|
/tests/transformer/operators/test_comparison_operators.py
|
7aa3e9a5096c5ddc3c9ace004c03500d96bcecca
|
[
"ZPL-2.1"
] |
permissive
|
zopefoundation/RestrictedPython
|
4cf9b85819648672078c746f059b714feb252ed0
|
aac3f96d07212cb6e07f3bcafd6da63f531ca9e0
|
refs/heads/master
| 2023-09-03T20:13:55.796804
| 2023-08-31T06:08:49
| 2023-08-31T06:08:49
| 8,480,726
| 341
| 43
|
NOASSERTION
| 2023-09-01T06:01:53
| 2013-02-28T14:29:45
|
Python
|
UTF-8
|
Python
| false
| false
| 408
|
py
|
test_comparison_operators.py
|
from tests.helper import restricted_eval
def test_Eq():
assert restricted_eval('1 == 1') is True
def test_NotEq():
assert restricted_eval('1 != 2') is True
def test_Gt():
assert restricted_eval('2 > 1') is True
def test_Lt():
assert restricted_eval('1 < 2')
def test_GtE():
assert restricted_eval('2 >= 2') is True
def test_LtE():
assert restricted_eval('1 <= 2') is True
|
08f32e0d7d6f8048fadb091f7a452794afddd8ff
|
110044654f706e920380dad2779bb32a77f1f26f
|
/test/Java/RMIC.py
|
b7e45f4e9ee844dc597ca7b99b240d7339fa956b
|
[
"MIT",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
SCons/scons
|
89327bb9635cee6e7cc59249edca9cd859d7d1ff
|
b2a7d7066a2b854460a334a5fe737ea389655e6e
|
refs/heads/master
| 2023-09-01T19:37:03.603772
| 2023-08-28T04:32:42
| 2023-08-28T04:32:42
| 104,670,160
| 1,827
| 342
|
MIT
| 2023-09-14T15:13:21
| 2017-09-24T19:23:46
|
Python
|
UTF-8
|
Python
| false
| false
| 9,903
|
py
|
RMIC.py
|
#!/usr/bin/env python
#
# MIT License
#
# Copyright The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
test.file_fixture(['Java-fixture', 'myrmic.py'])
test.write('SConstruct', """
DefaultEnvironment(tools=[])
env = Environment(tools=['rmic'], RMIC=r'%(_python_)s myrmic.py')
env.RMIC(target='outdir', source='test1.java')
""" % locals())
test.write('test1.java', """\
test1.java
/*rmic*/
line 3
""")
test.run(arguments = '.', stderr = None)
test.must_match(['outdir', 'test1.class'], "test1.java\nline 3\n", mode='r')
if os.path.normcase('.java') == os.path.normcase('.JAVA'):
test.write('SConstruct', """\
DefaultEnvironment(tools=[])
env = Environment(tools=['rmic'], RMIC=r'%(_python_)s myrmic.py')
env.RMIC(target='outdir', source='test2.JAVA')
""" % locals())
test.write('test2.JAVA', """\
test2.JAVA
/*rmic*/
line 3
""")
test.run(arguments = '.', stderr = None)
test.must_match(['outdir', 'test2.class'], "test2.JAVA\nline 3\n", mode='r')
where_javac, java_version = test.java_where_javac()
where_rmic = test.java_where_rmic()
# Try to get the major/minor Java version
curver = (1, 0)
if java_version.count('.') == 1:
# Check Java version
major, minor = java_version.split('.')
try:
curver = (int(major), int(minor))
except ValueError:
pass
elif java_version.count('.') == 0:
# java 11?
try:
curver = (int(java_version), 0)
except ValueError:
pass
# Check the version of the found Java compiler.
# If it's 1.8 or higher, we skip the further RMIC test
# because we'll get warnings about the deprecated API...
# it's just not state-of-the-art anymore.
# Recent java versions (9 and greater) are back to being
# marketed as a simple version, but java_where_javac() will
# still return a dotted version, like 10.0. If this changes,
# will need to rework this rule.
# Note, how we allow simple version strings like "5" and
# "6" to successfully pass this test.
if curver < (1, 8):
test.file_fixture('wrapper_with_args.py')
test.write('SConstruct', """
DefaultEnvironment(tools=[])
foo = Environment(tools=['javac', 'rmic'])
foo.Java(target='class1', source='com/sub/foo')
foo.RMIC(
target='outdir1',
source=['class1/com/sub/foo/Example1.class', 'class1/com/sub/foo/Example2'],
JAVACLASSDIR='class1',
)
rmic = foo.Dictionary('RMIC')
bar = foo.Clone(RMIC=r'%(_python_)s wrapper_with_args.py ' + rmic)
bar_classes = bar.Java(target='class2', source='com/sub/bar')
# XXX This is kind of a Python brute-force way to do what Ant
# does with its "excludes" attribute. We should probably find
# a similar friendlier way to do this.
bar_classes = [c for c in bar_classes if 'Hello' not in str(c)]
bar.RMIC(target=Dir('outdir2'), source=bar_classes)
""" % locals() )
test.subdir(
'com',
['com', 'other'],
['com', 'sub'],
['com', 'sub', 'foo'],
['com', 'sub', 'bar'],
'src3a',
'src3b',
)
test.write(['com', 'sub', 'foo', 'Hello.java'], """\
package com.sub.foo;
import java.rmi.Remote;
import java.rmi.RemoteException;
public interface Hello extends Remote {
String sayHello() throws RemoteException;
}
""")
test.write(['com', 'sub', 'foo', 'Example1.java'], """\
package com.sub.foo;
import java.rmi.Naming;
import java.rmi.RemoteException;
import java.lang.SecurityManager;
import java.rmi.server.UnicastRemoteObject;
public class Example1 extends UnicastRemoteObject implements Hello {
static final long serialVersionUID = 0;
public Example1() throws RemoteException {
super();
}
public String sayHello() {
return "Hello World!";
}
public static void main(String args[]) {
if (System.getSecurityManager() == null) {
System.setSecurityManager(new SecurityManager());
}
try {
Example1 obj = new Example1();
Naming.rebind("//myhost/HelloServer", obj);
System.out.println("HelloServer bound in registry");
} catch (Exception e) {
System.out.println("Example1 err: " + e.getMessage());
e.printStackTrace();
}
}
}
""")
test.write(['com', 'sub', 'foo', 'Example2.java'], """\
package com.sub.foo;
import java.rmi.Naming;
import java.rmi.RemoteException;
import java.lang.SecurityManager;
import java.rmi.server.UnicastRemoteObject;
public class Example2 extends UnicastRemoteObject implements Hello {
static final long serialVersionUID = 0;
public Example2() throws RemoteException {
super();
}
public String sayHello() {
return "Hello World!";
}
public static void main(String args[]) {
if (System.getSecurityManager() == null) {
System.setSecurityManager(new SecurityManager());
}
try {
Example2 obj = new Example2();
Naming.rebind("//myhost/HelloServer", obj);
System.out.println("HelloServer bound in registry");
} catch (Exception e) {
System.out.println("Example2 err: " + e.getMessage());
e.printStackTrace();
}
}
}
""")
test.write(['com', 'sub', 'bar', 'Hello.java'], """\
package com.sub.bar;
import java.rmi.Remote;
import java.rmi.RemoteException;
public interface Hello extends Remote {
String sayHello() throws RemoteException;
}
""")
test.write(['com', 'sub', 'bar', 'Example3.java'], """\
package com.sub.bar;
import java.rmi.Naming;
import java.rmi.RemoteException;
import java.lang.SecurityManager;
import java.rmi.server.UnicastRemoteObject;
public class Example3 extends UnicastRemoteObject implements Hello {
static final long serialVersionUID = 0;
public Example3() throws RemoteException {
super();
}
public String sayHello() {
return "Hello World!";
}
public static void main(String args[]) {
if (System.getSecurityManager() == null) {
System.setSecurityManager(new SecurityManager());
}
try {
Example3 obj = new Example3();
Naming.rebind("//myhost/HelloServer", obj);
System.out.println("HelloServer bound in registry");
} catch (Exception e) {
System.out.println("Example3 err: " + e.getMessage());
e.printStackTrace();
}
}
}
""")
test.write(['com', 'sub', 'bar', 'Example4.java'], """\
package com.sub.bar;
import java.rmi.Naming;
import java.rmi.RemoteException;
import java.lang.SecurityManager;
import java.rmi.server.UnicastRemoteObject;
public class Example4 extends UnicastRemoteObject implements Hello {
static final long serialVersionUID = 0;
public Example4() throws RemoteException {
super();
}
public String sayHello() {
return "Hello World!";
}
public static void main(String args[]) {
if (System.getSecurityManager() == null) {
System.setSecurityManager(new SecurityManager());
}
try {
Example4 obj = new Example4();
Naming.rebind("//myhost/HelloServer", obj);
System.out.println("HelloServer bound in registry");
} catch (Exception e) {
System.out.println("Example4 err: " + e.getMessage());
e.printStackTrace();
}
}
}
""")
test.run(arguments = '.')
test.must_match('wrapper.out',
"wrapper_with_args.py rmic -d outdir2 -classpath class2 com.sub.bar.Example3 com.sub.bar.Example4\n",
mode='r')
test.must_exist(test.workpath('outdir1', 'com', 'sub', 'foo', 'Example1_Stub.class'))
test.must_exist(test.workpath('outdir1', 'com', 'sub', 'foo', 'Example2_Stub.class'))
test.must_exist(test.workpath('outdir2', 'com', 'sub', 'bar', 'Example3_Stub.class'))
test.must_exist(test.workpath('outdir2', 'com', 'sub', 'bar', 'Example4_Stub.class'))
# We used to check for _Skel.class files as well, but they're not
# generated by default starting with Java 1.5, and they apparently
# haven't been needed for a while. Don't bother looking, even if we're
# running Java 1.4. If we think they're needed but they don't exist
# the test.up_to_date() call below will detect it.
#test.must_exist(test.workpath('outdir1', 'com', 'sub', 'foo', 'Example1_Skel.class'))
#test.must_exist(test.workpath('outdir1', 'com', 'sub', 'foo', 'Example2_Skel.class'))
#test.must_exist(test.workpath('outdir2', 'com', 'sub', 'bar', 'Example3_Skel.class'))
#test.must_exist(test.workpath('outdir2', 'com', 'sub', 'bar', 'Example4_Skel.class'))
test.up_to_date(arguments = '.')
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
bd2557e59db18d3d02f12a89f72d9c5ac8016f9b
|
4091caecbc727e6d6ae0d827afce11c5979a84fd
|
/tools/accuracy_checker/openvino/tools/accuracy_checker/annotation_converters/dataset_folder.py
|
de2b5fbdaf1595f549797f9c3b1f21c816d6cca0
|
[
"Apache-2.0"
] |
permissive
|
openvinotoolkit/open_model_zoo
|
fdb03dd40bfccb854e4ed4f7b9beaa90596963cd
|
7929adbe91e9cfe8dc5dc1daad5ae7392f9719a0
|
refs/heads/master
| 2023-08-18T18:03:47.254427
| 2023-08-18T10:54:31
| 2023-08-18T10:54:31
| 153,097,694
| 1,712
| 730
|
Apache-2.0
| 2023-09-11T11:31:20
| 2018-10-15T10:55:02
|
Python
|
UTF-8
|
Python
| false
| false
| 1,445
|
py
|
dataset_folder.py
|
"""
Copyright (c) 2018-2023 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from .format_converter import DirectoryBasedAnnotationConverter, ConverterReturn
from ..representation import ClassificationAnnotation
class DatasetFolderConverter(DirectoryBasedAnnotationConverter):
__provider__ = 'cls_dataset_folder'
def convert(self, check_content=False, **kwargs):
meta = self.get_meta()
annotations = []
for idx, cls_dir in meta['label_map'].items():
for img in (self.data_dir / cls_dir).glob('*'):
identifier = '{}/{}'.format(cls_dir, img.name)
annotations.append(ClassificationAnnotation(identifier, idx))
return ConverterReturn(annotations, meta, None)
def get_meta(self):
classes = [directory.name for directory in self.data_dir.glob('*') if directory.is_dir()]
classes.sort()
return {'label_map': dict(enumerate(classes))}
|
f1d98e2a5a123e990f1544436e1ffd09cb128053
|
25e99a0af5751865bce1702ee85cc5c080b0715c
|
/design_pattern/src/大話設計模式/design-patterns-py/factory_method.py
|
5f7a9a3abf6792c67db02aec1818fc7bdfc57a9a
|
[] |
no_license
|
jasonblog/note
|
215837f6a08d07abe3e3d2be2e1f183e14aa4a30
|
4471f95736c60969a718d854cab929f06726280a
|
refs/heads/master
| 2023-05-31T13:02:27.451743
| 2022-04-04T11:28:06
| 2022-04-04T11:28:06
| 35,311,001
| 130
| 67
| null | 2023-02-10T21:26:36
| 2015-05-09T02:04:40
|
C
|
UTF-8
|
Python
| false
| false
| 1,345
|
py
|
factory_method.py
|
import abc
class Leifeng:
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def wash(self):
""""wash"""
@abc.abstractmethod
def sweep(self):
"""sweep"""
@abc.abstractmethod
def buy_rice(self):
"""buy rice"""
class Undergraduate(Leifeng):
def wash(self):
print "undergraduate wash"
def sweep(self):
print "undergraduate sweep"
def buy_rice(self):
print "undergraduate buy rice"
class Volunteer(Leifeng):
def wash(self):
print "volunteer wash"
def sweep(self):
print "volunteer sweep"
def buy_rice(self):
print "volunteer buy rice"
class IFactory:
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def CreateLeifeng(self):
"""create class leifeng"""
class UndergraduateFactory(IFactory):
def CreateLeifeng(self):
return Undergraduate()
class VolunteerFactory(IFactory):
def CreateLeifeng(self):
return Volunteer()
if __name__ == "__main__":
# create undergraduate to sweep
i_factory = UndergraduateFactory()
leifeng = i_factory.CreateLeifeng()
leifeng.sweep()
# create volunteer to wash
i_factory = VolunteerFactory() # just replace UndergraduateFactory with VolunteerFactory
leifeng = i_factory.CreateLeifeng()
leifeng.wash()
|
c9c24a5981782ffb6fd35974af70cb52976b8710
|
27b86f422246a78704e0e84983b2630533a47db6
|
/src/ezdxf/npshapes.py
|
357f80696ba11cd974efcdceb75a12eb11dca9c7
|
[
"MIT"
] |
permissive
|
mozman/ezdxf
|
7512decd600896960660f0f580cab815bf0d7a51
|
ba6ab0264dcb6833173042a37b1b5ae878d75113
|
refs/heads/master
| 2023-09-01T11:55:13.462105
| 2023-08-15T11:50:05
| 2023-08-15T12:00:04
| 79,697,117
| 750
| 194
|
MIT
| 2023-09-14T09:40:41
| 2017-01-22T05:55:55
|
Python
|
UTF-8
|
Python
| false
| false
| 16,096
|
py
|
npshapes.py
|
# Copyright (c) 2023, Manfred Moitzi
# License: MIT License
from __future__ import annotations
from typing import Iterable, Optional, Iterator, Sequence
from typing_extensions import Self, TypeAlias
import abc
import numpy as np
from ezdxf.math import (
Matrix44,
Vec2,
Vec3,
has_clockwise_orientation,
Bezier3P,
Bezier4P,
BoundingBox2d,
)
from ezdxf.path import (
Path,
Command,
PathElement,
LineTo,
MoveTo,
Curve3To,
Curve4To,
)
try:
from ezdxf.acc import np_support # type: ignore # mypy ???
except ImportError:
np_support = None
__all__ = [
"NumpyPath2d",
"NumpyPoints2d",
"NumpyShapesException",
"EmptyShapeError",
"to_qpainter_path",
"to_matplotlib_path",
"single_paths",
]
# comparing Command.<attrib> to ints is very slow
CMD_MOVE_TO = int(Command.MOVE_TO)
CMD_LINE_TO = int(Command.LINE_TO)
CMD_CURVE3_TO = int(Command.CURVE3_TO)
CMD_CURVE4_TO = int(Command.CURVE4_TO)
class NumpyShapesException(Exception):
pass
class EmptyShapeError(NumpyShapesException):
pass
CommandNumpyType: TypeAlias = np.int8
VertexNumpyType: TypeAlias = np.float64
class NumpyShape2d(abc.ABC):
"""This is an optimization to store many 2D paths and polylines in a compact way
without sacrificing basic functions like transformation and bounding box calculation.
"""
_vertices: np.ndarray
def extents(self) -> tuple[Vec2, Vec2]:
"""Returns the extents of the bounding box as tuple (extmin, extmax)."""
v = self._vertices
if len(v) > 0:
return Vec2(v.min(0)), Vec2(v.max(0))
else:
raise EmptyShapeError("empty shape has no extends")
def np_vertices(self) -> np.ndarray:
return self._vertices
def transform_inplace(self, m: Matrix44) -> None:
"""Transforms the vertices of the shape inplace."""
v = self._vertices
if len(v) == 0:
return
m.transform_array_inplace(v, 2)
def vertices(self) -> list[Vec2]:
"""Returns the shape vertices as list of :class:`Vec2`."""
return [Vec2(v) for v in self._vertices]
def bbox(self) -> BoundingBox2d:
"""Returns the bounding box of all vertices."""
return BoundingBox2d(self.extents())
class NumpyPoints2d(NumpyShape2d):
"""Represents an array of 2D points stored as a ndarray."""
def __init__(self, points: Iterable[Vec2 | Vec3]) -> None:
self._vertices = np.array([(v.x, v.y) for v in points], dtype=VertexNumpyType)
def __len__(self) -> int:
return len(self._vertices)
NO_VERTICES = np.array([], dtype=VertexNumpyType)
NO_COMMANDS = np.array([], dtype=CommandNumpyType)
class NumpyPath2d(NumpyShape2d):
"""Represents a 2D path, the path control vertices and commands are stored as ndarray.
This class cannot build paths from scratch and is therefore not a drop-in replacement
for the :class:`ezdxf.path.Path` class. Operations like transform and reverse are
done inplace to utilize the `numpy` capabilities. This behavior is different from the
:class:`ezdxf.path.Path` class!!!
Construct new paths by the :class:`Path` class and convert them to
:class:`NumpyPath2d` instances::
path = Path((0, 0))
path.line_to((50, 70))
...
path2d = NumpyPath2d(path)
"""
def __init__(self, path: Optional[Path]) -> None:
if path is None:
self._vertices = NO_VERTICES
self._commands = NO_COMMANDS
return
# (v.x, v.y) is 4x faster than Vec2(v), see profiling/numpy_array_setup.py
vertices = [(v.x, v.y) for v in path.control_vertices()]
if len(vertices) == 0:
try: # control_vertices() does not return the start point of empty paths
vertices = [path.start]
except IndexError:
vertices = []
self._vertices = np.array(vertices, dtype=VertexNumpyType)
self._commands = np.array(path.command_codes(), dtype=CommandNumpyType)
def __len__(self) -> int:
return len(self._commands)
@property
def start(self) -> Vec2:
"""Returns the start point as :class:`~ezdxf.math.Vec2` instance."""
return Vec2(self._vertices[0])
@property
def end(self) -> Vec2:
"""Returns the end point as :class:`~ezdxf.math.Vec2` instance."""
return Vec2(self._vertices[-1])
def control_vertices(self) -> list[Vec2]:
return [Vec2(v) for v in self._vertices]
def __copy__(self) -> Self:
clone = self.__class__(None)
clone._commands = self._commands.copy()
clone._vertices = self._vertices.copy()
return clone
def command_codes(self) -> list[int]:
"""Internal API."""
return list(self._commands)
def commands(self) -> Iterator[PathElement]:
vertices = self.vertices()
index = 1
for cmd in self._commands:
if cmd == CMD_LINE_TO:
yield LineTo(vertices[index])
index += 1
elif cmd == CMD_CURVE3_TO:
yield Curve3To(vertices[index + 1], vertices[index])
index += 2
elif cmd == CMD_CURVE4_TO:
yield Curve4To(
vertices[index + 2], vertices[index], vertices[index + 1]
)
index += 3
elif cmd == CMD_MOVE_TO:
yield MoveTo(vertices[index])
index += 1
clone = __copy__
def to_path(self) -> Path:
"""Returns a new :class:`ezdxf.path.Path` instance."""
vertices = [Vec3(v) for v in self._vertices]
commands = [Command(c) for c in self._commands]
return Path.from_vertices_and_commands(vertices, commands)
@classmethod
def from_vertices(
cls, vertices: Iterable[Vec2 | Vec3], close: bool = False
) -> Self:
new_path = cls(None)
vertices = list(vertices)
if len(vertices) == 0:
return new_path
if close and not vertices[0].isclose(vertices[-1]):
vertices.append(vertices[0])
# (v.x, v.y) is 4x faster than Vec2(v), see profiling/numpy_array_setup.py
points = [(v.x, v.y) for v in vertices]
new_path._vertices = np.array(points, dtype=VertexNumpyType)
new_path._commands = np.full(
len(points) - 1, fill_value=CMD_LINE_TO, dtype=CommandNumpyType
)
return new_path
@property
def has_sub_paths(self) -> bool:
"""Returns ``True`` if the path is a :term:`Multi-Path` object that
contains multiple sub-paths.
"""
return CMD_MOVE_TO in self._commands
@property
def is_closed(self) -> bool:
"""Returns ``True`` if the start point is close to the end point."""
if len(self._vertices) > 1:
return self.start.isclose(self.end)
return False
@property
def has_lines(self) -> bool:
"""Returns ``True`` if the path has any line segments."""
return CMD_LINE_TO in self._commands
@property
def has_curves(self) -> bool:
"""Returns ``True`` if the path has any curve segments."""
return CMD_CURVE3_TO in self._commands or CMD_CURVE4_TO in self._commands
def sub_paths(self) -> list[Self]:
"""Yield all sub-paths as :term:`Single-Path` objects.
It's safe to call :meth:`sub_paths` on any path-type:
:term:`Single-Path`, :term:`Multi-Path` and :term:`Empty-Path`.
"""
def append_sub_path() -> None:
s: Self = self.__class__(None)
s._vertices = vertices[vtx_start_index : vtx_index + 1] # .copy() ?
s._commands = commands[cmd_start_index:cmd_index] # .copy() ?
sub_paths.append(s)
commands = self._commands
if len(commands) == 0:
return []
if CMD_MOVE_TO not in commands:
return [self]
sub_paths: list[Self] = []
vertices = self._vertices
vtx_start_index = 0
vtx_index = 0
cmd_start_index = 0
cmd_index = 0
for cmd in commands:
if cmd == CMD_LINE_TO:
vtx_index += 1
elif cmd == CMD_CURVE3_TO:
vtx_index += 2
elif cmd == CMD_CURVE4_TO:
vtx_index += 3
elif cmd == CMD_MOVE_TO:
append_sub_path()
# MOVE_TO target vertex is the start vertex of the following path.
vtx_index += 1
vtx_start_index = vtx_index
cmd_start_index = cmd_index + 1
cmd_index += 1
if commands[-1] != CMD_MOVE_TO:
append_sub_path()
return sub_paths
def has_clockwise_orientation(self) -> bool:
"""Returns ``True`` if 2D path has clockwise orientation.
Raises:
TypeError: can't detect orientation of a :term:`Multi-Path` object
"""
if self.has_sub_paths:
raise TypeError("can't detect orientation of a multi-path object")
if np_support is None:
return has_clockwise_orientation(self.vertices())
else:
return np_support.has_clockwise_orientation(self._vertices)
def reverse(self) -> Self:
"""Reverse path orientation inplace."""
commands = self._commands
if not len(self._commands):
return self
if commands[-1] == CMD_MOVE_TO:
# The last move_to will become the first move_to.
# A move_to as first command just moves the start point and can be
# removed!
# There are never two consecutive MOVE_TO commands in a Path!
self._commands = np.flip(commands[:-1]).copy()
self._vertices = np.flip(self._vertices[:-1, ...], axis=0).copy()
else:
self._commands = np.flip(commands).copy()
self._vertices = np.flip(self._vertices, axis=0).copy()
return self
def clockwise(self) -> Self:
"""Apply clockwise orientation inplace.
Raises:
TypeError: can't detect orientation of a :term:`Multi-Path` object
"""
if not self.has_clockwise_orientation():
self.reverse()
return self
def counter_clockwise(self) -> Self:
"""Apply counter-clockwise orientation inplace.
Raises:
TypeError: can't detect orientation of a :term:`Multi-Path` object
"""
if self.has_clockwise_orientation():
self.reverse()
return self
def flattening(self, distance: float, segments: int = 4) -> Iterator[Vec2]:
"""Flatten path to vertices as :class:`Vec2` instances."""
if not len(self._commands):
return
vertices = self.vertices()
start = vertices[0]
yield start
index = 1
for cmd in self._commands:
if cmd == CMD_LINE_TO or cmd == CMD_MOVE_TO:
end_location = vertices[index]
index += 1
yield end_location
elif cmd == CMD_CURVE3_TO:
ctrl, end_location = vertices[index : index + 2]
index += 2
pts = Vec2.generate(
Bezier3P((start, ctrl, end_location)).flattening(distance, segments)
)
next(pts) # skip first vertex
yield from pts
elif cmd == CMD_CURVE4_TO:
ctrl1, ctrl2, end_location = vertices[index : index + 3]
index += 3
pts = Vec2.generate(
Bezier4P((start, ctrl1, ctrl2, end_location)).flattening(
distance, segments
)
)
next(pts) # skip first vertex
yield from pts
else:
raise ValueError(f"Invalid command: {cmd}")
start = end_location
# Appending single commands (line_to, move_to, curve3_to, curve4_to) is not
# efficient, because numpy arrays do not grow dynamically, they are reallocated for
# every single command!
# Construct paths as ezdxf.path.Path and convert them to NumpyPath2d.
# Concatenation of NumpyPath2d objects is faster than extending Path objects
def extend(self, paths: Sequence[NumpyPath2d]) -> None:
"""Extend an existing path by appending additional paths. The paths are
connected by MOVE_TO commands if the end- and start point of sequential paths
are not coincident (multi-path).
"""
if not len(paths):
return
if not len(self._commands):
first = paths[0]
paths = paths[1:]
else:
first = self
vertices: list[np.ndarray] = [first._vertices]
commands: list[np.ndarray] = [first._commands]
end: Vec2 = first.end
for next_path in paths:
if len(next_path._commands) == 0:
continue
if not end.isclose(next_path.start):
commands.append(np.array((CMD_MOVE_TO,), dtype=CommandNumpyType))
vertices.append(next_path._vertices)
else:
vertices.append(next_path._vertices[1:])
end = next_path.end
commands.append(next_path._commands)
self._vertices = np.concatenate(vertices, axis=0)
self._commands = np.concatenate(commands)
@staticmethod
def concatenate(paths: Sequence[NumpyPath2d]) -> NumpyPath2d:
"""Returns a new path of concatenated paths. The paths are connected by
MOVE_TO commands if the end- and start point of sequential paths are not
coincident (multi-path).
"""
if not paths:
return NumpyPath2d(None)
first = paths[0].clone()
first.extend(paths[1:])
return first
def to_qpainter_path(paths: Iterable[NumpyPath2d]):
"""Convert the given `paths` into a single :class:`QPainterPath`."""
from ezdxf.addons.xqt import QPainterPath, QPointF
paths = list(paths)
if len(paths) == 0:
raise ValueError("one or more paths required")
qpath = QPainterPath()
for path in paths:
points = [QPointF(v.x, v.y) for v in path.vertices()]
qpath.moveTo(points[0])
index = 1
for cmd in path.command_codes():
# using Command.<attr> slows down this function by a factor of 4!!!
if cmd == CMD_LINE_TO:
qpath.lineTo(points[index])
index += 1
elif cmd == CMD_CURVE3_TO:
qpath.quadTo(points[index], points[index + 1])
index += 2
elif cmd == CMD_CURVE4_TO:
qpath.cubicTo(points[index], points[index + 1], points[index + 2])
index += 3
elif cmd == CMD_MOVE_TO:
qpath.moveTo(points[index])
index += 1
return qpath
MPL_MOVETO = 1
MPL_LINETO = 2
MPL_CURVE3 = 3
MPL_CURVE4 = 4
MPL_CODES = [
(0,), # dummy
(MPL_LINETO,),
(MPL_CURVE3, MPL_CURVE3),
(MPL_CURVE4, MPL_CURVE4, MPL_CURVE4),
(MPL_MOVETO,),
]
def to_matplotlib_path(paths: Iterable[NumpyPath2d]):
"""Convert the given `paths` into a single :class:`matplotlib.path.Path`."""
from matplotlib.path import Path
paths = list(paths)
if len(paths) == 0:
raise ValueError("one or more paths required")
vertices: list[np.ndarray] = []
codes: list[int] = []
for path in paths:
vertices.append(path.np_vertices())
codes.append(MPL_MOVETO)
for cmd in path.command_codes():
codes.extend(MPL_CODES[cmd])
return Path(np.concatenate(vertices), codes)
def single_paths(paths: Iterable[NumpyPath2d]) -> list[NumpyPath2d]:
single_paths_: list[NumpyPath2d] = []
for p in paths:
sub_paths = p.sub_paths()
if sub_paths:
single_paths_.extend(sub_paths)
return single_paths_
|
0a3730ed382637349efc469af54fb786a89eb51e
|
1bb42bac177fb4e979faa441363c27cb636a43aa
|
/reconstruction/stackoverflow/models.py
|
f7af71d6748e909d225f78f401c5bd5937b6136f
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
google-research/federated
|
a6040e80fa0fbf533e0d665c66a9bc549d208b3d
|
329e60fa56b87f691303638ceb9dfa1fc5083953
|
refs/heads/master
| 2023-08-28T13:10:10.885505
| 2023-08-22T23:06:08
| 2023-08-22T23:06:40
| 295,559,343
| 595
| 187
|
Apache-2.0
| 2022-05-12T08:42:53
| 2020-09-14T23:09:07
|
Python
|
UTF-8
|
Python
| false
| false
| 7,999
|
py
|
models.py
|
# Copyright 2020, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Stackoverflow reconstruction models."""
import collections
import tensorflow as tf
from reconstruction import keras_utils
from reconstruction import reconstruction_model
class GlobalEmbedding(tf.keras.layers.Layer):
"""A custom Keras Embedding layer used for the global embeddings.
The `GlobalEmbedding`s correspond to embeddings with input dimension size
vocabulary_size + 3 (pad/bos/eos). The updates to these embeddings are sent
to the server.
"""
def __init__(
self,
total_vocab_size: int,
embedding_dim: int,
mask_zero: bool = True,
initializer: tf.keras.initializers = tf.keras.initializers.random_uniform, # pytype: disable=invalid-annotation # typed-keras
**kwargs):
super(GlobalEmbedding, self).__init__(**kwargs)
self.total_vocab_size = total_vocab_size
self.embedding_dim = embedding_dim
self.mask_zero = mask_zero
self.initializer = initializer
def build(self, input_shape):
self.embeddings = self.add_weight(
shape=(self.total_vocab_size, self.embedding_dim),
initializer=self.initializer,
name='global_embedding',
)
def call(self, inputs):
embedding_inputs = tf.where(inputs < self.total_vocab_size, inputs,
tf.zeros_like(input=inputs))
embeddings = tf.nn.embedding_lookup(self.embeddings, embedding_inputs)
return tf.where(
tf.expand_dims(inputs < self.total_vocab_size, axis=-1), embeddings,
tf.zeros_like(input=embeddings))
def compute_mask(self, inputs, mask=None):
if not self.mask_zero:
return None
return tf.not_equal(inputs, 0)
class LocalEmbedding(tf.keras.layers.Layer):
"""A custom Keras Embedding layer used for the local embeddings.
The `LocalEmbedding`s correspond to embeddings of input size
number of out of vocabulary buckets.
These embeddings are reconstructed locally at the beginning of every round,
and their updates never leave the device.
"""
def __init__(
self,
input_dim: int,
embedding_dim: int,
total_vocab_size: int,
mask_zero: bool = True,
initializer: tf.keras.initializers = tf.keras.initializers.random_uniform, # pytype: disable=invalid-annotation # typed-keras
**kwargs):
super(LocalEmbedding, self).__init__(**kwargs)
self.input_dim = input_dim
self.embedding_dim = embedding_dim
self.mask_zero = mask_zero
self.total_vocab_size = total_vocab_size
self.initializer = initializer
def build(self, input_shape):
self.embeddings = self.add_weight(
shape=(self.input_dim, self.embedding_dim),
initializer=self.initializer,
name='local_embedding',
)
def call(self, inputs):
embedding_inputs = tf.where(inputs >= self.total_vocab_size,
inputs - self.total_vocab_size,
tf.zeros_like(input=inputs))
embeddings = tf.nn.embedding_lookup(self.embeddings, embedding_inputs)
return tf.where(
tf.expand_dims(inputs >= self.total_vocab_size, axis=-1), embeddings,
tf.zeros_like(input=embeddings))
def compute_mask(self, inputs, mask=None):
if not self.mask_zero:
return None
return tf.not_equal(inputs, 0)
def create_recurrent_reconstruction_model(
vocab_size: int = 10000,
num_oov_buckets: int = 1,
embedding_size: int = 96,
latent_size: int = 670,
num_layers: int = 1,
input_spec=None,
global_variables_only: bool = False,
name: str = 'rnn_recon_embeddings',
) -> reconstruction_model.ReconstructionModel:
"""Creates a recurrent model with a partially reconstructed embedding layer.
Constructs a recurrent model for next word prediction, with the embedding
layer divided in two parts:
- A global_embedding, which shares its parameter updates with the server.
- A locally reconstructed local_embedding layer, reconstructed at the
beginning of every round, that never leaves the device. This local
embedding layer corresponds to the out of vocabulary buckets.
Args:
vocab_size: Size of vocabulary to use.
num_oov_buckets: Number of out of vocabulary buckets.
embedding_size: The size of the embedding.
latent_size: The size of the recurrent state.
num_layers: The number of layers.
input_spec: A structure of `tf.TensorSpec`s specifying the type of arguments
the model expects. Notice this must be a compound structure of two
elements, specifying both the data fed into the model to generate
predictions, as its first element, as well as the expected type of the
ground truth as its second.
global_variables_only: If True, the returned `ReconstructionModel` contains
all model variables as global variables. This can be useful for
baselines involving aggregating all variables.
name: (Optional) string to name the returned `tf.keras.Model`.
Returns:
`ReconstructionModel` tracking global and local variables for a recurrent
model.
"""
if vocab_size < 0:
raise ValueError('The vocab_size is expected to be greater than, or equal '
'to 0. Got {}'.format(vocab_size))
if num_oov_buckets <= 0:
raise ValueError('The number of out of vocabulary buckets is expected to '
'be greater than 0. Got {}'.format(num_oov_buckets))
global_layers = []
local_layers = []
total_vocab_size = vocab_size + 3 # pad/bos/eos.
extended_vocab_size = total_vocab_size + num_oov_buckets # pad/bos/eos + oov.
inputs = tf.keras.layers.Input(shape=(None,), dtype=tf.int64)
global_embedding = GlobalEmbedding(
total_vocab_size=total_vocab_size,
embedding_dim=embedding_size,
mask_zero=True,
name='global_embedding_layer')
global_layers.append(global_embedding)
local_embedding = LocalEmbedding(
input_dim=num_oov_buckets,
embedding_dim=embedding_size,
total_vocab_size=total_vocab_size,
mask_zero=True,
name='local_embedding_layer')
local_layers.append(local_embedding)
projected = tf.keras.layers.Add()(
[global_embedding(inputs),
local_embedding(inputs)])
for i in range(num_layers):
layer = tf.keras.layers.LSTM(
latent_size, return_sequences=True, name='lstm_' + str(i))
global_layers.append(layer)
processed = layer(projected)
# A projection changes dimension from rnn_layer_size to
# input_embedding_size.
projection = tf.keras.layers.Dense(
embedding_size, name='projection_' + str(i))
global_layers.append(projection)
projected = projection(processed)
# We predict the OOV tokens as part of the output vocabulary.
last_layer = tf.keras.layers.Dense(
extended_vocab_size, activation=None, name='last_layer')
global_layers.append(last_layer)
logits = last_layer(projected)
model = tf.keras.Model(inputs=inputs, outputs=logits, name=name)
if input_spec is None:
input_spec = collections.OrderedDict(
x=tf.TensorSpec(shape=(None,), dtype=tf.int64),
y=tf.TensorSpec(shape=(None,), dtype=tf.int64))
# Merge local layers into global layers if needed.
if global_variables_only:
global_layers.extend(local_layers)
local_layers = []
return keras_utils.from_keras_model(
keras_model=model,
global_layers=global_layers,
local_layers=local_layers,
input_spec=input_spec)
|
74461071acae1202af15d7051383f03438adf298
|
23652304566b1869ca65b95b116ee43d16e134f3
|
/tests/h/util/document_claims_test.py
|
d8e86ed54b8e790e78691484c863638a8f94cb2d
|
[
"BSD-2-Clause",
"BSD-3-Clause",
"BSD-2-Clause-Views"
] |
permissive
|
hypothesis/h
|
29399a26990856c336b05022e827541dd8aeedab
|
232446d776fdb906d2fb253cf0a409c6813a08d6
|
refs/heads/main
| 2023-08-30T16:21:33.754658
| 2023-08-30T09:26:50
| 2023-08-30T09:40:48
| 3,910,945
| 2,558
| 452
|
BSD-2-Clause
| 2023-09-14T11:25:06
| 2012-04-02T19:56:59
|
Python
|
UTF-8
|
Python
| false
| false
| 28,384
|
py
|
document_claims_test.py
|
import re
import pytest
from h_matchers import Any
from h.util import document_claims
from h.util.document_claims import doi_uri_from_string
class TestDocumentURIsFromLinks:
def test_it_ignores_href_links_that_match_the_claimant_uri(self):
"""
Links containing only the claimant URI should be ignored.
If document.link contains a link dict with just an "href" and no other
keys, and the value of the "href" key is the same as the claimant URI,
then this link dict should be ignored and not produce an additional
document URI dict in the output (since the document URI that it would
generate would be the same as the "self-claim" claimant URI one that is
always generated anyway).
"""
claimant = "http://localhost:5000/docs/help"
link_dicts = [{"href": claimant}]
document_uris = document_claims.document_uris_from_links(link_dicts, claimant)
assert not document_uris
def test_it_ignores_doi_links(self):
"""
Links containing only an href that starts with doi should be ignored.
If document.link contains a link dict with just an "href" and no other
keys, and the value of the "href" key begins with "doi:", then the link
dict should be ignored and not produce a document URI dict in the
output.
This is because document URI dicts for doi: URIs are generate
separately from other metadata in the document dict outside of the
"link" list.
"""
link_dicts = [{"href": "doi:10.3389/fenvs.2014.00003"}]
document_uris = document_claims.document_uris_from_links(
link_dicts, claimant="http://localhost:5000/docs/help"
)
assert not document_uris
def test_it_ignores_highwire_pdf_links(self):
pdf_url = "http://example.com/example.pdf"
link_dicts = [{"href": pdf_url, "type": "application/pdf"}]
document_uris = document_claims.document_uris_from_links(
link_dicts, claimant="http://localhost:5000/docs/help"
)
assert not document_uris
def test_it_returns_rel_alternate_document_uris_for_rel_alternate_links(self):
alternate_url = "http://example.com/alternate"
link_dicts = [{"href": alternate_url, "rel": "alternate"}]
document_uris = document_claims.document_uris_from_links(
link_dicts, claimant="http://localhost:5000/docs/help"
)
alternate_document_uri = one(
[d for d in document_uris if d["type"] == "rel-alternate"]
)
assert alternate_document_uri == {
"type": "rel-alternate",
"claimant": "http://localhost:5000/docs/help",
"content_type": "",
"uri": alternate_url,
}
def test_it_uses_link_types_as_document_uri_content_types(self):
"""
Link types get converted to document URI content_types.
The value of the 'type' key in link dicts ends up as the value of the
'content_type' key in the returned document URI dicts.
"""
link_dicts = [{"href": "http://example.com/example.html", "type": "text/html"}]
document_uris = document_claims.document_uris_from_links(
link_dicts, claimant="http://example.com/example.html"
)
assert one([d for d in document_uris if d.get("content_type") == "text/html"])
def test_it_returns_multiple_document_URI_dicts(self):
"""If there are multiple claims it should return multiple dicts."""
link_dicts = [
{"href": "http://example.com/example.html", "type": "text/html"},
{"href": "http://example.com/alternate.html", "rel": "alternate"},
{"href": "http://example.com/example2.html", "type": "text/html"},
]
document_uris = document_claims.document_uris_from_links(
link_dicts, claimant="http://example.com/claimant.html"
)
assert len(document_uris) == 3
class TestDocumentMetasFromData:
@pytest.mark.parametrize(
"input_,output",
[
# String values get turned into length 1 lists.
({"foo": "string"}, {"type": "foo", "value": ["string"]}),
# List values get copied over unchanged.
({"foo": ["one", "two"]}, {"type": "foo", "value": ["one", "two"]}),
# Sub-dicts get flattened using a '.' separator in the key,
# and length 1 list values in sub-dicts get copied over unchanged.
(
{"facebook": {"description": ["document description"]}},
{"type": "facebook.description", "value": ["document description"]},
),
# Length >1 list values in sub-dicts get copied over unchanged.
(
{
"facebook": {
"image": [
"http://example.com/image1.png",
"http://example.com/image2.png",
"http://example.com/image3.jpeg",
]
}
},
{
"type": "facebook.image",
"value": [
"http://example.com/image1.png",
"http://example.com/image2.png",
"http://example.com/image3.jpeg",
],
},
),
# String values in sub-dicts get turned into length 1 lists.
({"foo": {"bar": "string"}}, {"type": "foo.bar", "value": ["string"]}),
# Leading and trailing whitespace gets stripped from document titles.
(
{
"title": [
" My Document",
"My Document ",
" My Document ",
"\nMy Document\n\n",
"\rMy Document\r\n",
"\tMy Document \t \t ",
]
},
{
"type": "title",
"value": [
"My Document",
"My Document",
"My Document",
"My Document",
"My Document",
"My Document",
],
},
),
# Leading and trailing whitespace does not get-stripped from non-titles.
(
{
"foo": [
" My Document",
"My Document ",
" My Document ",
"\nMy Document\n\n",
"\rMy Document\r\n",
"\tMy Document \t \t ",
]
},
{
"type": "foo",
"value": [
" My Document",
"My Document ",
" My Document ",
"\nMy Document\n\n",
"\rMy Document\r\n",
"\tMy Document \t \t ",
],
},
),
],
)
def test_document_metas_from_data(self, input_, output):
claimant = "http://example.com/claimant/"
document_metas = document_claims.document_metas_from_data(
document_data=input_, claimant=claimant
)
assert document_metas == [
{"type": output["type"], "value": output["value"], "claimant": claimant}
]
def test_document_metas_from_data_ignores_links_list(self):
"""It should ignore the "link" list in the document_data."""
document_data = {"link": [{"href": "http://example.com/link"}]}
document_metas = document_claims.document_metas_from_data(
document_data, "http://example/claimant"
)
assert not document_metas
def test_document_metas_from_data_with_multiple_metadata_claims(self):
"""
It should create one DocumentMeta for each metadata claim.
If document_data contains multiple metadata claims it should init one
DocumentMeta for each claim.
"""
claimant = "http://example/claimant"
document_data = {
"title": "the title",
"description": "the description",
"site_title": "the site title",
}
document_metas = document_claims.document_metas_from_data(
document_data, claimant
)
assert len(document_metas) == len(document_data.items())
for key, value in document_data.items():
assert {
"type": key,
"value": [value],
"claimant": claimant,
} in document_metas
def test_document_metas_from_data_ignores_null_titles(self):
"""It should ignore null document titles."""
for title in (None, [None, None]):
document_data = {"title": title}
document_metas = document_claims.document_metas_from_data(
document_data, "http://example/claimant"
)
assert not document_metas
def test_document_metas_from_data_allows_null_non_titles(self):
"""Null values are allowed if 'type' isn't 'title'."""
for value in (None, [None, None]):
document_data = {"foo": value}
document_metas = document_claims.document_metas_from_data(
document_data, "http://example/claimant"
)
if not isinstance(value, list):
# We expect it to turn non-lists into length-1 lists.
value = [value]
assert document_metas == [
{"type": "foo", "value": value, "claimant": "http://example/claimant"}
]
def test_document_metas_from_data_ignores_empty_string_titles(self):
"""It should ignore empty document titles."""
for title in ("", ["", ""]):
document_data = {"title": title}
document_metas = document_claims.document_metas_from_data(
document_data, "http://example/claimant"
)
assert not document_metas
def test_document_metas_from_data_allows_empty_string_non_titles(self):
"""Empty strings are allowed if 'type' isn't 'title'."""
for value in ("", ["", ""]):
document_data = {"foo": value}
document_metas = document_claims.document_metas_from_data(
document_data, "http://example/claimant"
)
if not isinstance(value, list):
# We expect it to turn non-lists into length-1 lists.
value = [value]
assert document_metas == [
{"type": "foo", "value": value, "claimant": "http://example/claimant"}
]
def test_document_metas_from_data_ignores_whitespace_only_titles(self):
"""It should ignore whitespace-only document titles."""
for title in (" ", [" ", " "], "\n\n \n"):
document_data = {"title": title}
document_metas = document_claims.document_metas_from_data(
document_data, "http://example/claimant"
)
assert not document_metas
def test_document_metas_from_data_allows_whitespace_only_non_titles(self):
"""Whitespace-only strings are allowed if 'type' isn't 'title'."""
for value in (" ", [" ", " "], "\n\n \n"):
document_data = {"foo": value}
document_metas = document_claims.document_metas_from_data(
document_data, "http://example/claimant"
)
if not isinstance(value, list):
# We expect it to turn non-lists into length-1 lists.
value = [value]
assert document_metas == [
{"type": "foo", "value": value, "claimant": "http://example/claimant"}
]
class TestDocumentURIsFromHighwirePDF:
def test_highwire_pdf_values_produce_highwire_pdf_document_uris(self):
highwire_dict = {
"pdf_url": [
"http://example.com/1.pdf",
"http://example.com/2.pdf",
"http://example.com/3.pdf",
]
}
document_uris = document_claims.document_uris_from_highwire_pdf(
highwire_dict, claimant="http://example.com/example.html"
)
for pdf in highwire_dict["pdf_url"]:
document_uri = one([d for d in document_uris if d.get("uri") == pdf])
assert document_uri == {
"claimant": "http://example.com/example.html",
"uri": pdf,
"type": "highwire-pdf",
"content_type": "application/pdf",
}
class TestDOIURIFromString:
@pytest.mark.parametrize("doi", ["10.1001/1234", "doi:10.1001/1234"])
def test_it_prepends_doi_prefix(self, doi):
assert doi_uri_from_string(doi) == f"doi:{strip_prefix('doi:', doi)}"
@pytest.mark.parametrize(
"url",
[
"http://doi.org/10.1234/5678",
"https://doi.org/10.1234/5678",
"http://dx.doi.org/10.1234/5678",
"https://dx.doi.org/10.1234/5678",
],
)
def test_it_allows_doi_urls(self, url):
# Many sites store DOI URLs rather than just identifiers in DOI fields.
# We should ideally normalize the different forms, but for now we just
# continue to accept them.
assert doi_uri_from_string(url) == f"doi:{url}"
@pytest.mark.parametrize(
"doi",
[
# Empty
"doi:",
"",
# Whitespace only
"doi: ",
" ",
# Strings that do not match the DOI syntax.
"9.1234/567",
"chapter1/section1",
"1234.5678",
"10.0.0.1",
"10.0/1234",
# Non-DOI URLs
"https://publisher.org/foo.html",
],
)
def test_it_returns_none_if_invalid(self, doi):
assert doi_uri_from_string(doi) is None
@pytest.mark.parametrize("doi", [" doi: 10.1234/5678"])
def test_it_strips_whitespace(self, doi):
assert doi_uri_from_string(doi) == re.sub("\\s+", "", doi)
class TestDocumentURIsFromHighwireDOI:
def test_highwire_doi_values_produce_highwire_doi_document_uris(self):
highwire_dict = {
"doi": [
"doi:10.1038/nphys1170",
"doi:10.1002/0470841559.ch1",
"doi:10.1594/PANGAEA.726855",
]
}
document_uris = document_claims.document_uris_from_highwire_doi(
highwire_dict, claimant="http://example.com/example.html"
)
for doi in highwire_dict["doi"]:
document_uri = one([d for d in document_uris if d.get("uri") == doi])
assert document_uri == {
"claimant": "http://example.com/example.html",
"uri": doi,
"type": "highwire-doi",
"content_type": "",
}
def test_it_ignores_invalid_dois(self):
"""If `doi_uri_from_string` returns `None`, the identifier is ignored."""
highwire_dict = {"doi": ["doi:"]}
document_uris = document_claims.document_uris_from_highwire_doi(
highwire_dict, claimant="http://example.com/example.html"
)
assert not document_uris
class TestDocumentURIsFromDC:
def test_dc_identifiers_produce_dc_doi_document_uris(self):
"""Each 'identifier' list item in the 'dc' dict becomes a doc URI."""
dc_dict = {
"identifier": [
"doi:10.1038/nphys1170",
"doi:10.1002/0470841559.ch1",
"doi:10.1594/PANGAEA.726855",
]
}
document_uris = document_claims.document_uris_from_dc(
dc_dict, claimant="http://example.com/example.html"
)
for doi in dc_dict["identifier"]:
document_uri = one([d for d in document_uris if d.get("uri") == doi])
assert document_uri == {
"claimant": "http://example.com/example.html",
"uri": doi,
"type": "dc-doi",
"content_type": "",
}
def test_it_ignores_invalid_dois(self):
"""If `doi_uri_from_string` returns `None`, the identifier is ignored."""
dc_dict = {"identifier": ["doi:"]}
document_uris = document_claims.document_uris_from_dc(
dc_dict, claimant="http://example.com/example.html"
)
assert not document_uris
class TestDocumentURISelfClaim:
def test_document_uri_self_claim(self):
claimant = "http://localhost:5000/docs/help"
document_uri = document_claims.document_uri_self_claim(claimant)
assert document_uri == {
"claimant": claimant,
"uri": claimant,
"type": "self-claim",
"content_type": "",
}
@pytest.mark.usefixtures(
"document_uris_from_dc",
"document_uris_from_highwire_doi",
"document_uris_from_highwire_pdf",
"document_uris_from_links",
"document_uri_self_claim",
)
class TestDocumentURIsFromData:
def test_it_gets_document_uris_from_links(self, document_uris_from_links):
document_data = {
"link": [
# In production these would be link dicts not strings.
"link_dict_1",
"link_dict_2",
"link_dict_3",
]
}
claimant = "http://localhost:5000/docs/help"
document_uris_from_links.return_value = [
{"uri": "uri_1"},
{"uri": "uri_2"},
{"uri": "uri_3"},
]
document_uris = document_claims.document_uris_from_data(
document_data=document_data, claimant=claimant
)
document_uris_from_links.assert_called_once_with(
document_data["link"], claimant
)
for document_uri in document_uris_from_links.return_value:
assert document_uri in document_uris
def test_calling_document_uris_from_links_when_no_links(
self, document_uris_from_links
):
document_data = {} # No 'link' key.
claimant = "http://localhost:5000/docs/help"
document_claims.document_uris_from_data(
document_data=document_data, claimant=claimant
)
document_uris_from_links.assert_called_once_with([], claimant)
def test_it_gets_documents_uris_from_highwire_pdf(
self, document_uris_from_highwire_pdf
):
document_data = {"highwire": {"pdf": ["pdf_1", "pdf_2", "pdf_3"]}}
claimant = "http://localhost:5000/docs/help"
document_uris_from_highwire_pdf.return_value = [
{"uri": "uri_1"},
{"uri": "uri_2"},
{"uri": "uri_3"},
]
document_uris = document_claims.document_uris_from_data(
document_data=document_data, claimant=claimant
)
document_uris_from_highwire_pdf.assert_called_once_with(
document_data["highwire"], claimant
)
for document_uri in document_uris_from_highwire_pdf.return_value:
assert document_uri in document_uris
def test_calling_document_uris_from_highwire_pdf_when_no_highwire(
self, document_uris_from_highwire_pdf
):
document_data = {} # No 'highwire' key.
claimant = "http://localhost:5000/docs/help"
document_claims.document_uris_from_data(
document_data=document_data, claimant=claimant
)
document_uris_from_highwire_pdf.assert_called_once_with({}, claimant)
def test_it_gets_documents_uris_from_highwire_doi(
self, document_uris_from_highwire_doi
):
document_data = {"highwire": {"doi": ["doi_1", "doi_2", "doi_3"]}}
claimant = "http://localhost:5000/docs/help"
document_uris_from_highwire_doi.return_value = [
{"uri": "uri_1"},
{"uri": "uri_2"},
{"uri": "uri_3"},
]
document_uris = document_claims.document_uris_from_data(
document_data=document_data, claimant=claimant
)
document_uris_from_highwire_doi.assert_called_once_with(
document_data["highwire"], claimant
)
for document_uri in document_uris_from_highwire_doi.return_value:
assert document_uri in document_uris
def test_calling_document_uris_from_highwire_doi_when_no_highwire(
self, document_uris_from_highwire_doi
):
document_data = {} # No 'highwire' key.
claimant = "http://localhost:5000/docs/help"
document_claims.document_uris_from_data(
document_data=document_data, claimant=claimant
)
document_uris_from_highwire_doi.assert_called_once_with({}, claimant)
def test_it_gets_documents_uris_from_dc(self, document_uris_from_dc):
document_data = {"dc": {"identifier": ["doi_1", "doi_2", "doi_3"]}}
claimant = "http://localhost:5000/docs/help"
document_uris_from_dc.return_value = [
{"uri": "uri_1"},
{"uri": "uri_2"},
{"uri": "uri_3"},
]
document_uris = document_claims.document_uris_from_data(
document_data=document_data, claimant=claimant
)
document_uris_from_dc.assert_called_once_with(document_data["dc"], claimant)
for document_uri in document_uris_from_dc.return_value:
assert document_uri in document_uris
def test_calling_document_uris_from_dc_when_no_dc(self, document_uris_from_dc):
document_data = {} # No 'dc' key.
claimant = "http://localhost:5000/docs/help"
document_claims.document_uris_from_data(
document_data=document_data, claimant=claimant
)
document_uris_from_dc.assert_called_once_with({}, claimant)
def test_it_gets_self_claim_document_uris(self, document_uri_self_claim):
claimant = "http://example.com/claimant"
document_uris = document_claims.document_uris_from_data({}, claimant)
document_uri_self_claim.assert_called_once_with(claimant)
assert document_uri_self_claim.return_value in document_uris
def test_it_ignores_null_uris(
self,
document_uris_from_links,
document_uris_from_highwire_pdf,
document_uris_from_highwire_doi,
document_uris_from_dc,
document_uri_self_claim,
):
document_uris_from_links.return_value = [{"uri": None}]
document_uris_from_highwire_pdf.return_value = [{"uri": None}]
document_uris_from_highwire_doi.return_value = [{"uri": None}]
document_uris_from_dc.return_value = [{"uri": None}]
document_uri_self_claim.return_value = {"uri": None}
document_uris = document_claims.document_uris_from_data(
{}, "http://example.com/claimant"
)
assert document_uris == []
def test_it_ignores_empty_string_uris(
self,
document_uris_from_links,
document_uris_from_highwire_pdf,
document_uris_from_highwire_doi,
document_uris_from_dc,
document_uri_self_claim,
):
document_uris_from_links.return_value = [{"uri": ""}]
document_uris_from_highwire_pdf.return_value = [{"uri": ""}]
document_uris_from_highwire_doi.return_value = [{"uri": ""}]
document_uris_from_dc.return_value = [{"uri": ""}]
document_uri_self_claim.return_value = {"uri": ""}
document_uris = document_claims.document_uris_from_data(
{}, "http://example.com/claimant"
)
assert document_uris == []
def test_it_ignores_whitespace_only_self_claim_uris(self, document_uri_self_claim):
for uri in (" ", "\n ", "\r\n", " \t"):
document_uri_self_claim.return_value = {"uri": uri}
document_uris = document_claims.document_uris_from_data(
{}, "http://example.com/claimant"
)
assert document_uris == []
def test_it_ignores_whitespace_only_uris(
self,
document_uris_from_links,
document_uris_from_highwire_pdf,
document_uris_from_highwire_doi,
document_uris_from_dc,
document_uri_self_claim,
):
uris = [" ", "\n ", "\r\n", " \t"]
document_uris_from_links.return_value = [{"uri": u} for u in uris]
document_uris_from_highwire_pdf.return_value = [{"uri": u} for u in uris]
document_uris_from_highwire_doi.return_value = [{"uri": u} for u in uris]
document_uris_from_dc.return_value = [{"uri": u} for u in uris]
document_uris = document_claims.document_uris_from_data(
{}, "http://example.com/claimant"
)
assert document_uris == [document_uri_self_claim.return_value]
def test_it_strips_whitespace_from_uris(
self,
document_uris_from_links,
document_uris_from_highwire_pdf,
document_uris_from_highwire_doi,
document_uris_from_dc,
document_uri_self_claim,
):
document_uris_from_links.return_value = [
{"uri": " from_link_1"},
{"uri": "from_link_2 "},
{"uri": " from_link_3 "},
]
document_uris_from_highwire_pdf.return_value = [
{"uri": " highwire_1"},
{"uri": "highwire_2 "},
{"uri": " highwire_3 "},
]
document_uris_from_highwire_doi.return_value = [
{"uri": " doi_1"},
{"uri": "doi_2 "},
{"uri": " doi_3 "},
]
document_uris_from_dc.return_value = [
{"uri": " dc_1"},
{"uri": "dc_2 "},
{"uri": " dc_3 "},
]
document_uris = document_claims.document_uris_from_data(
{}, "http://example.com/claimant"
)
assert (
document_uris
== Any.list.containing(
[
{"uri": "from_link_1"},
{"uri": "from_link_2"},
{"uri": "from_link_3"},
{"uri": "highwire_1"},
{"uri": "highwire_2"},
{"uri": "highwire_3"},
{"uri": "doi_1"},
{"uri": "doi_2"},
{"uri": "doi_3"},
{"uri": "dc_1"},
{"uri": "dc_2"},
{"uri": "dc_3"},
document_uri_self_claim.return_value,
]
).only()
)
def test_it_strips_whitespace_from_self_claim_uris(
self, document_uris_from_links, document_uri_self_claim
):
for uri in (" self_claim", "self_claim ", " self_claim "):
document_uris_from_links.return_value = []
document_uri_self_claim.return_value = {"uri": uri}
document_uris = document_claims.document_uris_from_data(
{}, "http://example.com/claimant"
)
assert document_uris == [{"uri": uri.strip()}]
@pytest.fixture
def document_uris_from_dc(self, patch):
return patch("h.util.document_claims.document_uris_from_dc", return_value=[])
@pytest.fixture
def document_uris_from_highwire_pdf(self, patch):
return patch(
"h.util.document_claims.document_uris_from_highwire_pdf", return_value=[]
)
@pytest.fixture
def document_uris_from_highwire_doi(self, patch):
return patch(
"h.util.document_claims.document_uris_from_highwire_doi", return_value=[]
)
@pytest.fixture
def document_uris_from_links(self, patch):
return patch("h.util.document_claims.document_uris_from_links", return_value=[])
@pytest.fixture
def document_uri_self_claim(self, patch):
return patch("h.util.document_claims.document_uri_self_claim")
def one(list_):
assert len(list_) == 1
return list_[0]
def strip_prefix(prefix, s):
if s.startswith(prefix):
return s[len(prefix) :]
return s
|
81933784cabd59d1ebd244c554b217f54e1e9c2b
|
7a7ee4b7c551ce92483e7162b3064f30219fbddc
|
/benchmarks/randomized_svd_fbpca.py
|
71e25d2dd4578b178b17602ebbc7f94472dd69a1
|
[
"Apache-2.0"
] |
permissive
|
mratsim/Arraymancer
|
ccd8267fa2869d73a5a028ecceabf9e96dfdb69c
|
e2df3dd7509588e3a863c690389649ced790344a
|
refs/heads/master
| 2023-09-02T06:39:26.116762
| 2023-08-31T15:56:39
| 2023-08-31T15:56:39
| 88,188,361
| 1,258
| 125
|
Apache-2.0
| 2023-09-12T13:09:07
| 2017-04-13T17:10:19
|
Nim
|
UTF-8
|
Python
| false
| false
| 2,341
|
py
|
randomized_svd_fbpca.py
|
import numpy as np
import time
from fbpca import pca
from scipy.linalg import hilbert
def bench(Observations, Features):
N = max(Observations, Features)
k = 40
# Create a known ill-conditionned matrix for testing
# This requires 20k * 20k * 4 bytes (float32) = 1.6 GB
start = time.time()
H = hilbert(N)[:Observations, :Features]
stop = time.time()
print(f'Hilbert matrix creation too: {stop-start:>4.4f} seconds.')
print(f'Matrix of shape: [{Observations}, {Features}]')
print(f'Target SVD: [{Observations}, {k}]')
start = time.time()
(U, S, Vh) = pca(H, k=k, raw=True, n_iter=2, l=k+5) # Raw=True for SVD
stop = time.time()
print(f'Randomized SVD took: {stop-start:>4.4f} seconds')
print("U: ", U.shape)
print("S: ", S.shape)
print("Vh: ", Vh.shape)
print("---------------------------------------------------------------------------------")
bench(Observations = 20000, Features = 4000)
bench(Observations = 4000, Features = 20000)
# i9-9980XE Overclocked at 4.1GHz, AVX 4.0GHz, AVX512 3.5GHz
# Numpy / Scipy built with OpenBLAS
#
# Hilbert matrix creation too: 0.9500 seconds.
# Matrix of shape: [20000, 4000]
# Target SVD: [20000, 40]
# Randomized SVD took: 2.1240 seconds
# U: (20000, 40)
# S: (40,)
# Vh: (40, 4000)
# ---------------------------------------------------------------------------------
# Hilbert matrix creation too: 0.9441 seconds.
# Matrix of shape: [4000, 20000]
# Target SVD: [4000, 40]
# Randomized SVD took: 0.4008 seconds
# U: (4000, 40)
# S: (40,)
# Vh: (40, 20000)
# ---------------------------------------------------------------------------------
# 4.81s, 3742.4Mb -- xtime.rb
# mem usage with just the first SVD: 3.77GB
# Numpy / Scipy built with MKL
#
# Hilbert matrix creation too: 0.9380 seconds.
# Matrix of shape: [20000, 4000]
# Target SVD: [20000, 40]
# Randomized SVD took: 2.1632 seconds
# U: (20000, 40)
# S: (40,)
# Vh: (40, 4000)
# ---------------------------------------------------------------------------------
# Hilbert matrix creation too: 0.9698 seconds.
# Matrix of shape: [4000, 20000]
# Target SVD: [4000, 40]
# Randomized SVD took: 0.3566 seconds
# U: (4000, 40)
# S: (40,)
# Vh: (40, 20000)
# ---------------------------------------------------------------------------------
# 4.78s, 3742.7Mb
|
0c514a2044e6694245a4f2d98d90195470295e10
|
0d7f11605de1c72e5927036c7fc200132e4e7abd
|
/lantern/data/data_cufflinks.py
|
802f83ae553f0f871f052074f42c38cc1aad2c49
|
[
"Apache-2.0"
] |
permissive
|
timkpaine/lantern
|
603c91604095d07b48645c5d11013ddf93631b44
|
7129fe3db104138d79ebf553ae9aedd9256c5c50
|
refs/heads/main
| 2023-08-26T14:07:03.898226
| 2022-11-22T01:33:39
| 2022-11-22T01:33:39
| 97,290,549
| 331
| 26
|
Apache-2.0
| 2023-09-04T14:01:57
| 2017-07-15T03:49:10
|
Python
|
UTF-8
|
Python
| false
| false
| 3,047
|
py
|
data_cufflinks.py
|
from .cfgen import scattergeo, \
choropleth, \
scatter, \
scatter3d, \
bubble, \
bubble3d, \
pie, \
heatmap, \
bars, \
ohlc, \
ohlcv, \
box, \
histogram, \
surface, \
sinwave, \
getName, \
lines
def getCFData(type, n_categories=5, n=100, **kwargs):
if type == 'scatter':
return scatter(n_categories,
n,
prefix=kwargs.get('prefix', 'category'),
mode=kwargs.get('mode', None))[['x', 'y', 'categories', 'text']]
elif type == 'scatter3d':
return scatter3d(n_categories,
n,
prefix=kwargs.get('prefix', 'category'),
mode=kwargs.get('mode', None))
elif type == 'bubble':
return bubble(n_categories,
n,
prefix=kwargs.get('prefix', 'category'),
mode=kwargs.get('mode', None))[['x', 'y', 'categories', 'size', 'text']]
elif type == 'bubble3d':
return bubble3d(n_categories,
n,
prefix=kwargs.get('prefix', 'category'),
mode=kwargs.get('mode', None))
elif type == 'pie':
return pie(n_labels=kwargs.get('n_lablels', 5),
mode=kwargs.get('mode', None))
elif type == 'heatmap':
return heatmap(n_x=kwargs.get('n_x', 5),
n_y=kwargs.get('n_y', 10))
elif type == 'bars':
return bars(n,
n_categories,
prefix=kwargs.get('prefix', 'category'),
columns=kwargs.get('columns', None),
mode=kwargs.get('mode', 'abc'))
elif type == 'ohlc':
return ohlc(n)
elif type == 'ohlcv':
return ohlcv(n)
elif type == 'box':
return box(n_traces=kwargs.get('n_traces', 5),
n=n,
mode=kwargs.get('mode', None))
elif type == 'histogram':
return histogram(n_traces=kwargs.get('n_traces', 1),
n=n,
mode=None)
elif type == 'surface':
return surface(n_x=kwargs.get('n_x', 20),
n_y=kwargs.get('n_y', 20))
elif type == 'sinwave':
return sinwave(n=n,
inc=kwargs.get('inc', .25))
if type == 'scattergeo':
return scattergeo()
elif type == 'choropleth':
return choropleth()
elif type == 'stock':
return getName(n=1,
name=kwargs.get('name', 3),
exchange=kwargs.get('exchange', 2),
columns=kwargs.get('columns', None),
mode=kwargs.get('mode', 'abc'))
else:
return lines(n_traces=kwargs.get('n_traces', 5),
n=n,
columns=kwargs.get('columns', None),
dateIndex=kwargs.get('dateIndex', True),
mode=kwargs.get('mode', None))
|
2763e0e07a4de9307684bc5e0687972a1d42b276
|
4bcc9806152542ab43fc2cf47c499424f200896c
|
/tensorflow/python/ops/template.py
|
4e36b6b963176d0dd5337da0b7dbda30df4d4652
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] |
permissive
|
tensorflow/tensorflow
|
906276dbafcc70a941026aa5dc50425ef71ee282
|
a7f3934a67900720af3d3b15389551483bee50b8
|
refs/heads/master
| 2023-08-25T04:24:41.611870
| 2023-08-25T04:06:24
| 2023-08-25T04:14:08
| 45,717,250
| 208,740
| 109,943
|
Apache-2.0
| 2023-09-14T20:55:50
| 2015-11-07T01:19:20
|
C++
|
UTF-8
|
Python
| false
| false
| 31,941
|
py
|
template.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides templates which allow variable sharing."""
import functools
import traceback
from tensorflow.python.checkpoint import checkpoint as trackable_util
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.trackable import base as trackable
from tensorflow.python.util import object_identity
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.tf_export import tf_export
__all__ = ["make_template"]
@tf_export(v1=["make_template"])
def make_template(name_,
func_,
create_scope_now_=False,
unique_name_=None,
custom_getter_=None,
**kwargs):
"""Given an arbitrary function, wrap it so that it does variable sharing.
@compatibility(TF2)
`tf.compat.v1.make_template` is a legacy API that is only compatible
with eager execution enabled and `tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`. See the model mapping
migration guide section on `make_template` for more info:
https://www.tensorflow.org/guide/migrate/model_mapping#using_tfcompatv1make_template_in_the_decorated_method
Even if you use legacy apis for `variable_scope`-based variable reuse,
we recommend using
`tf.compat.v1.keras.utils.track_tf1_style_variables` directly and not using
`tf.compat.v1.make_template`, as it interoperates with eager execution in a
simpler and more predictable fashion than `make_template`.
The TF2 API approach would be tracking your variables using
`tf.Module`s or Keras layers and models rather than relying on
`make_template`.
@end_compatibility
This wraps `func_` in a Template and partially evaluates it. Templates are
functions that create variables the first time they are called and reuse them
thereafter. In order for `func_` to be compatible with a `Template` it must
have the following properties:
* The function should create all trainable variables and any variables that
should be reused by calling `tf.compat.v1.get_variable`. If a trainable
variable is
created using `tf.Variable`, then a ValueError will be thrown. Variables
that are intended to be locals can be created by specifying
`tf.Variable(..., trainable=false)`.
* The function may use variable scopes and other templates internally to
create and reuse variables, but it shouldn't use
`tf.compat.v1.global_variables` to
capture variables that are defined outside of the scope of the function.
* Internal scopes and variable names should not depend on any arguments that
are not supplied to `make_template`. In general you will get a ValueError
telling you that you are trying to reuse a variable that doesn't exist
if you make a mistake.
In the following example, both `z` and `w` will be scaled by the same `y`. It
is important to note that if we didn't assign `scalar_name` and used a
different name for z and w that a `ValueError` would be thrown because it
couldn't reuse the variable.
```python
def my_op(x, scalar_name):
var1 = tf.compat.v1.get_variable(scalar_name,
shape=[],
initializer=tf.compat.v1.constant_initializer(1))
return x * var1
scale_by_y = tf.compat.v1.make_template('scale_by_y', my_op, scalar_name='y')
z = scale_by_y(input1)
w = scale_by_y(input2)
```
As a safe-guard, the returned function will raise a `ValueError` after the
first call if trainable variables are created by calling `tf.Variable`.
If all of these are true, then 2 properties are enforced by the template:
1. Calling the same template multiple times will share all non-local
variables.
2. Two different templates are guaranteed to be unique, unless you reenter the
same variable scope as the initial definition of a template and redefine
it. An examples of this exception:
```python
def my_op(x, scalar_name):
var1 = tf.compat.v1.get_variable(scalar_name,
shape=[],
initializer=tf.compat.v1.constant_initializer(1))
return x * var1
with tf.compat.v1.variable_scope('scope') as vs:
scale_by_y = tf.compat.v1.make_template('scale_by_y', my_op,
scalar_name='y')
z = scale_by_y(input1)
w = scale_by_y(input2)
# Creates a template that reuses the variables above.
with tf.compat.v1.variable_scope(vs, reuse=True):
scale_by_y2 = tf.compat.v1.make_template('scale_by_y', my_op,
scalar_name='y')
z2 = scale_by_y2(input1)
w2 = scale_by_y2(input2)
```
Depending on the value of `create_scope_now_`, the full variable scope may be
captured either at the time of first call or at the time of construction. If
this option is set to True, then all Tensors created by repeated calls to the
template will have an extra trailing _N+1 to their name, as the first time the
scope is entered in the Template constructor no Tensors are created.
Note: `name_`, `func_` and `create_scope_now_` have a trailing underscore to
reduce the likelihood of collisions with kwargs.
Args:
name_: A name for the scope created by this template. If necessary, the name
will be made unique by appending `_N` to the name.
func_: The function to wrap.
create_scope_now_: Boolean controlling whether the scope should be created
when the template is constructed or when the template is called. Default
is False, meaning the scope is created when the template is called.
unique_name_: When used, it overrides name_ and is not made unique. If a
template of the same scope/unique_name already exists and reuse is false,
an error is raised. Defaults to None.
custom_getter_: Optional custom getter for variables used in `func_`. See
the `tf.compat.v1.get_variable` `custom_getter` documentation for more
information.
**kwargs: Keyword arguments to apply to `func_`.
Returns:
A function to encapsulate a set of variables which should be created once
and reused. An enclosing scope will be created either when `make_template`
is called or when the result is called, depending on the value of
`create_scope_now_`. Regardless of the value, the first time the template
is called it will enter the scope with no reuse, and call `func_` to create
variables, which are guaranteed to be unique. All subsequent calls will
re-enter the scope and reuse those variables.
Raises:
ValueError: if `name_` is None.
"""
return make_template_internal(
name_,
func_,
create_scope_now_,
unique_name_,
custom_getter_,
create_graph_function_=False,
**kwargs)
def make_template_internal(name_,
func_,
create_scope_now_=False,
unique_name_=None,
custom_getter_=None,
create_graph_function_=False,
**kwargs):
"""Make a template, optionally compiling func_ into a graph function.
See `make_template` for full documentation.
Args:
name_: A name for the scope created by this template. If necessary, the name
will be made unique by appending `_N` to the name.
func_: The function to wrap.
create_scope_now_: Boolean controlling whether the scope should be created
when the template is constructed or when the template is called. Default
is False, meaning the scope is created when the template is called.
unique_name_: When used, it overrides name_ and is not made unique. If a
template of the same scope/unique_name already exists and reuse is false,
an error is raised. Defaults to None. If executing eagerly, must be None.
custom_getter_: Optional custom getter for variables used in `func_`. See
the `tf.compat.v1.get_variable` `custom_getter` documentation for more
information.
create_graph_function_: When True, `func_` will be executed as a graph
function. This implies that `func_` must satisfy the properties that
`function.defun` requires of functions: See the documentation of
`function.defun` for details. When executing eagerly, setting this flag
to True can improve performance. Regardless of whether eager execution
is enabled, enabling this flag gives the caller access to graph-function
semantics, i.e., accesses to variables are totally ordered and
side-effecting ops are not pruned.
**kwargs: Keyword arguments to apply to `func_`.
Returns:
A function to encapsulate a set of variables which should be created once
and reused. An enclosing scope will be created either when `make_template`
is called or when the result is called, depending on the value of
`create_scope_now_`. Regardless of the value, the first time the template
is called it will enter the scope with no reuse, and call `func_` to create
variables, which are guaranteed to be unique. All subsequent calls will
re-enter the scope and reuse those variables.
Raises:
ValueError: if `name_` is None.
ValueError: if `unique_name_` is not None and eager execution is enabled.
"""
if kwargs:
func_ = functools.partial(func_, **kwargs)
if context.executing_eagerly():
if unique_name_ is not None:
raise ValueError(
"unique_name_ cannot be used when eager execution is enabled.")
return EagerTemplate(
name_,
func_,
create_scope_now=create_scope_now_,
custom_getter=custom_getter_,
create_graph_function=create_graph_function_)
return Template(
name_,
func_,
create_scope_now=create_scope_now_,
unique_name=unique_name_,
custom_getter=custom_getter_,
create_graph_function=create_graph_function_)
def _skip_common_stack_elements(stacktrace, base_case):
"""Skips items that the target stacktrace shares with the base stacktrace."""
for i, (trace, base) in enumerate(zip(stacktrace, base_case)):
if trace != base:
return stacktrace[i:]
return stacktrace[-1:]
class Template(trackable.Trackable):
"""Wrap a function to aid in variable sharing.
Templates are functions that create variables the first time they are called
and reuse them thereafter. See `make_template` for full documentation.
Note: By default, the full variable scope is captured at the time of first
call. If `create_scope_now_` is passed as True to the constructor, the full
scope will be captured there, but no variables will created until the first
call.
"""
def __init__(self,
name,
func,
create_scope_now=False,
unique_name=None,
custom_getter=None,
create_graph_function=False):
"""Creates a template for the given function.
Args:
name: A name for the scope created by this template. The name will be made
unique by appending `_N` to the it (see how
`tf.compat.v1.variable_scope` treats the `default_name` for details).
func: The function to apply each time.
create_scope_now: Whether to create the scope at Template construction
time, rather than first call. Defaults to false. Creating the scope at
construction time may be more convenient if the template is to passed
through much lower level code, and you want to be sure of the scope name
without knowing exactly where it will be first called. If set to True,
the scope will be created in the constructor, and all subsequent times
in `__call__`, leading to a trailing numeral being added to the names of
all created Tensors. If set to False, the scope will be created at the
first call location.
unique_name: When used, it overrides `name` and is not made unique. If a
template of the same scope/unique_name already exists and reuse is
false, an error is raised. Defaults to None.
custom_getter: optional custom getter to pass to `variable_scope()`
create_graph_function: When True, `func` will be executed as a graph
function. Enabling this flag gives the caller access to graph-function
semantics, i.e., accesses to variables are totally ordered and
side-effecting ops are not pruned.
Raises:
ValueError: if `name` is None.
"""
if create_graph_function:
self._func = def_function.function(func)
else:
self._func = func
self._stacktrace = traceback.format_stack()[:-2]
self._name = name
self._unique_name = unique_name
self._custom_getter = custom_getter
if name is None:
raise ValueError("name cannot be None.")
if create_scope_now:
with variable_scope._pure_variable_scope( # pylint:disable=protected-access
(self._unique_name or
variable_scope._get_unique_variable_scope(self._name)), # pylint:disable=protected-access
custom_getter=self._custom_getter) as vs:
self._variable_scope = vs
else:
self._variable_scope = None
# This variable keeps track of whether the template has been called to
# completion, which is not the same as whether the scope has been created.
self._variables_created = False
# `MirroredStrategy` builds the graph with multiple threads. If a
# `merge_call` happens within a template, multiple calls may be in progress
# simultaneously. This variable keeps track of whether any call of the
# template has started.
self._first_call = True
def _call_func(self, args, kwargs):
try:
if self._variables_created:
vars_at_start = len(
ops.get_collection_ref(ops.GraphKeys.GLOBAL_VARIABLES))
trainable_at_start = len(
ops.get_collection_ref(ops.GraphKeys.TRAINABLE_VARIABLES))
result = self._func(*args, **kwargs)
# Variables were previously created, implying this is not the first
# time the template has been called. Check to make sure that no new
# trainable variables were created this time around.
trainable_variables = ops.get_collection_ref(
ops.GraphKeys.TRAINABLE_VARIABLES)
# If a variable that we intend to train is created as a side effect
# of creating a template, then that is almost certainly an error.
if trainable_at_start != len(trainable_variables):
raise ValueError("Trainable variable created when calling a template "
"after the first time, perhaps you used tf.Variable "
"when you meant tf.get_variable: %s" %
(trainable_variables[trainable_at_start:],))
# Non-trainable tracking variables are a legitimate reason why a new
# variable would be created, but it is a relatively advanced use-case,
# so log it.
variables = ops.get_collection_ref(ops.GraphKeys.GLOBAL_VARIABLES)
if vars_at_start != len(variables):
logging.info(
"New variables created when calling a template after "
"the first time, perhaps you used tf.Variable when you "
"meant tf.get_variable: %s", variables[vars_at_start:])
elif self._first_call:
self._first_call = False
try:
# The first time we run, restore variables if necessary (via
# Trackable).
with trackable_util.capture_dependencies(template=self):
result = self._func(*args, **kwargs)
except:
self._first_call = True
raise
self._variables_created = True
else: # We are calling the template in parallel from another thread.
result = self._func(*args, **kwargs)
return result
except Exception as exc:
# Reraise the exception, but append the original definition to the
# trace.
args = exc.args
if not args:
arg0 = ""
else:
arg0 = args[0]
trace = "".join(
_skip_common_stack_elements(self._stacktrace,
traceback.format_stack()))
arg0 = "%s\n\noriginally defined at:\n%s" % (arg0, trace)
new_args = [arg0]
new_args.extend(args[1:])
exc.args = tuple(new_args)
raise
def __call__(self, *args, **kwargs):
if self._variable_scope:
# Only reuse variables if not on first call.
with variable_scope.variable_scope(
self._variable_scope, reuse=not self._first_call):
return self._call_func(args, kwargs)
else:
# The scope was not created at construction time, so create it here.
# Subsequent calls should reuse variables.
with variable_scope.variable_scope(
self._unique_name, self._name,
custom_getter=self._custom_getter) as vs:
self._variable_scope = vs
return self._call_func(args, kwargs)
@property
def name(self):
"""Returns the name given to this Template."""
return self._name
@property
def func(self):
"""Returns the func given to this Template."""
return self._func
@property
def variable_scope(self):
"""Returns the variable scope object created by this Template."""
return self._variable_scope
@property
def variable_scope_name(self):
"""Returns the variable scope name created by this Template."""
if self._variable_scope:
name = self._variable_scope.name
if not name or name[-1] == "/":
return name
else:
# To prevent partial matches on the scope_name, we add '/' at the end.
return name + "/"
@property
def variables(self):
"""Returns the list of global and local variables created by the Template."""
return self.global_variables + self.local_variables
@property
def trainable_variables(self):
"""Returns the list of trainable variables created by the Template."""
if self._variables_created:
return ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES,
self.variable_scope_name)
else:
return []
@property
def non_trainable_variables(self):
"""Returns the list of non-trainable variables created by the Template."""
# TODO(apassos) Make sure it matches Eager when using local variables.
global_variables = self.global_variables
trainable_variables = set(self.trainable_variables)
return [x for x in global_variables if x not in trainable_variables]
@property
def global_variables(self):
"""Returns the list of global variables created by the Template."""
if self._variables_created:
return ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES,
self.variable_scope_name)
else:
return []
@property
def local_variables(self):
"""Returns the list of global variables created by the Template."""
if self._variables_created:
return ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES,
self.variable_scope_name)
else:
return []
@property
def weights(self):
"""List of weights/variables created by the Template."""
return self.variables
@property
def trainable_weights(self):
"""List of trainable weights/variables created by the Template."""
return self.trainable_variables
@property
def non_trainable_weights(self):
"""List of non-trainable weights/variables created by the Template."""
return self.non_trainable_variables
@property
@deprecated("2017-02-21",
"The .var_scope property is deprecated. Please change your "
"code to use the .variable_scope property")
def var_scope(self):
"""Returns the variable scope object created by this Template."""
return self._variable_scope
class _EagerTemplateVariableStore:
"""Wrapper around EagerVariableStore to support nesting EagerTemplates."""
def __init__(self, variable_scope_name):
self._variable_scope_name = variable_scope_name
default = variable_scope._get_default_variable_store() # pylint: disable=protected-access
if default._store_eager_variables: # pylint: disable=protected-access
self._eager_variable_store = variable_scope.EagerVariableStore(default)
else:
# If no outer eager variable store has been made,
# the template needs to create one
self._eager_variable_store = variable_scope.EagerVariableStore()
self._used_once = False
def set_variable_scope_name(self, variable_scope_name):
self._variable_scope_name = variable_scope_name
@tf_contextlib.contextmanager
def as_default(self):
try:
if not self._used_once:
# If an outer eager VariableStore was explicitly created and set by
# the first time this template store was used (even if not at
# constructor time) then pick up the outer variable store.
default = variable_scope._get_default_variable_store() # pylint: disable=protected-access
if default._store_eager_variables: # pylint: disable=protected-access
self._eager_variable_store._store = default # pylint: disable=protected-access
self._used_once = True
with self._eager_variable_store.as_default(): # pylint: disable=protected-access
yield
finally:
# Each _EagerTemplateVariableStore object lives underneath a variable
# scope (see EagerTemplate.__call__). This variable scope's subscopes are
# closed when the EagerTemplate object returns from __call__. For
# top-level _EagerTemplateVariableStore objects, the variable store to
# which the variable scope is attached is different from the
# EagerVariableStore; as such it is necessary to close its subscopes
# here as well.
if self._variable_scope_name is None:
raise RuntimeError("A variable scope must be set before an "
"_EagerTemplateVariableStore object exits.")
variable_scope.get_variable_scope_store().close_variable_subscopes(
self._variable_scope_name)
def _variables_in_scope(self, variable_list):
if self._variable_scope_name is None:
raise RuntimeError(
"A variable scope must be set before variables can be accessed.")
return [
v for v in variable_list
if v.name.startswith(self._variable_scope_name + "/")
]
def variables(self):
return self._variables_in_scope(self._eager_variable_store.variables())
def trainable_variables(self):
return self._variables_in_scope(
self._eager_variable_store.trainable_variables())
def non_trainable_variables(self):
return self._variables_in_scope(
self._eager_variable_store.non_trainable_variables())
class EagerTemplate(Template):
"""Wrap a function to aid in variable sharing in Eager mode.
Templates are functions that create variables the first time they are called
and reuse them thereafter. See `make_template` for full documentation.
Note: By default, the full variable scope is captured at the time of first
call. If `create_scope_now` is passed as True to the constructor, the full
scope will be captured there, but no variables will be created until the first
call.
"""
def __init__(self,
name,
func,
create_scope_now=False,
custom_getter=None,
create_graph_function=False):
"""Creates a template for the given function.
Args:
name: A name for the scope created by this template. The name will be made
unique by appending `_N` to the it (see how
`tf.compat.v1.variable_scope` treats the `default_name` for details).
func: The function to apply each time.
create_scope_now: Whether to create the scope at Template construction
time, rather than first call. Defaults to false. Creating the scope at
construction time may be more convenient if the template is passed
through much lower level code, and you want to be sure of the scope name
without knowing exactly where it will be first called. If set to True,
the scope will be created in the constructor, and all subsequent times
in `__call__`, leading to a trailing numeral being added to the names of
all created Tensors. If set to False, the scope will be created at the
first call location.
custom_getter: optional custom getter to pass to `variable_scope()`
create_graph_function: When True, `func` will be executed as a graph
function. Enabling this flag allows the caller to reap the performance
benefits associated with executing graphs, at the cost of sacrificing
debuggability; however, not all Python functions can be compiled into
graph functions. See the documentation for `function.defun` for details.
Raises:
RuntimeError: if eager execution is not enabled.
"""
if not context.executing_eagerly():
raise RuntimeError(
"{} objects can only be used when eager execution is enabled, use "
"tf.Template for graph construction".format(type(self)))
super(EagerTemplate, self).__init__(name, func, create_scope_now, None,
custom_getter, create_graph_function)
if self._variable_scope is not None:
variable_scope_name = self._variable_scope.name
else:
# Defer setting the variable scope name until the variable scope
# is created in __call__.
variable_scope_name = None
self._template_store = _EagerTemplateVariableStore(variable_scope_name)
self._variable_scope_context_manager = None
def _call_func(self, args, kwargs):
try:
vars_at_start = self._template_store.variables()
trainable_at_start = self._template_store.trainable_variables()
if self._variables_created:
result = self._func(*args, **kwargs)
else:
# The first time we run, restore variables if necessary (via
# Trackable).
with trackable_util.capture_dependencies(template=self):
result = self._func(*args, **kwargs)
if self._variables_created:
# Variables were previously created, implying this is not the first
# time the template has been called. Check to make sure that no new
# trainable variables were created this time around.
trainable_variables = self._template_store.trainable_variables()
# If a variable that we intend to train is created as a side effect
# of creating a template, then that is almost certainly an error.
if len(trainable_at_start) != len(trainable_variables):
raise ValueError(
"Trainable variable created when calling a template "
"after the first time, perhaps you used tf.Variable "
"when you meant tf.get_variable: %s" % list(
object_identity.ObjectIdentitySet(trainable_variables) -
object_identity.ObjectIdentitySet(trainable_at_start)))
# Non-trainable tracking variables are a legitimate reason why a new
# variable would be created, but it is a relatively advanced use-case,
# so log it.
variables = self._template_store.variables()
if len(vars_at_start) != len(variables):
logging.info(
"New variables created when calling a template after "
"the first time, perhaps you used tf.Variable when you "
"meant tf.get_variable: %s",
list(
object_identity.ObjectIdentitySet(variables) -
object_identity.ObjectIdentitySet(vars_at_start)))
else:
self._variables_created = True
return result
except Exception as exc:
# Reraise the exception, but append the original definition to the
# trace.
args = exc.args
if not args:
arg0 = ""
else:
arg0 = args[0]
trace = "".join(
_skip_common_stack_elements(self._stacktrace,
traceback.format_stack()))
arg0 = "%s\n\noriginally defined at:\n%s" % (arg0, trace)
new_args = [arg0]
new_args.extend(args[1:])
exc.args = tuple(new_args)
raise
def __call__(self, *args, **kwargs):
# In both branches below, the template store is installed as default after
# the variable scope is opened in order to ensure that templates nested at
# the same level correctly uniquify lower variable scope names.
if self._variable_scope:
# Create a cache for the variable scope context manager the first time
# around so that we don't have to keep recreating it.
if not self._variable_scope_context_manager:
self._variable_scope_context_manager = variable_scope.variable_scope(
self._variable_scope, reuse=variable_scope.AUTO_REUSE)
with self._variable_scope_context_manager:
with self._template_store.as_default():
return self._call_func(args, kwargs)
else:
# The scope was not created at construction time, so create it here.
# Subsequent calls should reuse variables.
with variable_scope.variable_scope(
self._unique_name, self._name,
custom_getter=self._custom_getter) as vs:
self._variable_scope = vs
# Because the scope was not created at construction time, the template
# store's variable scope name is unset; set it here.
self._template_store.set_variable_scope_name(vs.name)
with self._template_store.as_default():
return self._call_func(args, kwargs)
@property
def variables(self):
"""Returns the list of variables created by the Template."""
# Currently there is no local variable in Eager mode.
if not self._variables_created:
return []
return self._template_store.variables()
@property
def trainable_variables(self):
"""Returns the list of trainable variables created by the Template."""
# Currently there is no local variable in Eager mode.
if not self._variables_created:
return []
return self._template_store.trainable_variables()
@property
def non_trainable_variables(self):
"""Returns the list of non-trainable variables created by the Template."""
# Currently there is no local variable in Eager mode.
if not self._variables_created:
return []
return self._template_store.non_trainable_variables()
@property
def global_variables(self):
"""Returns the list of global variables created by the Template."""
# Currently there is no local variable in Eager mode.
if not self._variables_created:
return []
return self.variables
@property
def local_variables(self):
"""Returns the list of global variables created by the Template."""
# Currently there is no local variable in Eager mode.
return []
|
bdba8f91d15c91dad082afd138d3c7e34b27ec00
|
88fe84c79e5740b4aaa068df6a70e35841a68d25
|
/studies/awkward_cupy_perfscript.py
|
3ac3682db063ee055b70e62d589714a3c2298108
|
[
"BSD-3-Clause"
] |
permissive
|
scikit-hep/awkward
|
176f56182a936270e163eab92ea18368c2bdc1be
|
519bba6ed2eec4e227994d2fd1a62b2a51f15e20
|
refs/heads/main
| 2023-09-02T20:19:10.175088
| 2023-09-01T20:13:25
| 2023-09-01T20:13:25
| 202,413,762
| 208
| 22
|
BSD-3-Clause
| 2023-09-14T17:19:29
| 2019-08-14T19:32:12
|
Python
|
UTF-8
|
Python
| false
| false
| 912
|
py
|
awkward_cupy_perfscript.py
|
import awkward as ak
array = ak.from_parquet("/home/swish/Downloads/zlib9-jagged3.parquet", row_groups = range(25))
print(array)
import cupy
cuda_array = ak.to_backend(array, "cuda")
print(f"Memory used GPU: {cupy.get_default_memory_pool().used_bytes()}")
cuda_stream_1 =cupy.cuda.Stream(non_blocking=True)
cuda_stream_2 =cupy.cuda.Stream(non_blocking=True)
cuda_stream_3 =cupy.cuda.Stream(non_blocking=True)
with cuda_stream_1:
for i in range(10):
a = ak.num(cuda_array, 2)
a = ak.num(cuda_array, 1)
with cuda_stream_2:
for i in range(10):
b = ak.num(cuda_array, 3)
with cuda_stream_3:
for i in range(10):
c = ak.num(cuda_array, 1)
import awkward._connect.cuda
awkward._connect.cuda.synchronize_cuda(cuda_stream_1)
print(a)
awkward._connect.cuda.synchronize_cuda(cuda_stream_2)
print(b)
awkward._connect.cuda.synchronize_cuda(cuda_stream_3)
print(c)
|
bc570636f293e57b581c35cfe4f3aa6a59c555c0
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/helpers/typeshed/stubs/Deprecated/deprecated/__init__.pyi
|
c7b200a4e0904f2b8ca08f910ae9f48a4bf7a0a2
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 78
|
pyi
|
__init__.pyi
|
from .classic import deprecated as deprecated
__credits__: str
__date__: str
|
0f1862ce8d64f9c30c360436ff6a47fd594a374a
|
9efca95a55cb4df52d895d42f1ec10331516a734
|
/tools/c7n_mailer/c7n_mailer/datadog_delivery.py
|
8ca0896b4b461a06dcdf956a4944e4495d323078
|
[
"Apache-2.0"
] |
permissive
|
cloud-custodian/cloud-custodian
|
519e602abe00c642786441b64cc40857ef5bc9de
|
27563cf4571040f923124e1acb2463f11e372225
|
refs/heads/main
| 2023-09-04T10:54:55.963703
| 2023-09-01T17:40:17
| 2023-09-01T17:40:17
| 52,837,350
| 3,327
| 1,096
|
Apache-2.0
| 2023-09-14T14:03:30
| 2016-03-01T01:11:20
|
Python
|
UTF-8
|
Python
| false
| false
| 4,326
|
py
|
datadog_delivery.py
|
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import time
from datadog import api
from datadog import initialize
from urllib.parse import urlparse, parse_qsl
class DataDogDelivery:
DATADOG_API_KEY = "datadog_api_key"
DATADOG_APPLICATION_KEY = "datadog_application_key"
def __init__(self, config, session, logger):
self.config = config
self.logger = logger
self.session = session
self.datadog_api_key = self.config.get(self.DATADOG_API_KEY, None)
self.datadog_application_key = self.config.get(self.DATADOG_APPLICATION_KEY, None)
# Initialize datadog
if self.datadog_api_key and self.datadog_application_key:
options = {
"api_key": self.datadog_api_key,
"app_key": self.datadog_application_key,
}
initialize(**options)
def get_datadog_message_packages(self, sqs_message):
date_time = time.time()
datadog_rendered_messages = []
metric_config_map = self._get_metrics_config_to_resources_map(sqs_message)
if not metric_config_map:
return datadog_rendered_messages
if sqs_message and sqs_message.get("resources", False):
for resource in sqs_message["resources"]:
tags = [
"event:{}".format(sqs_message["event"]),
"account_id:{}".format(sqs_message["account_id"]),
"account:{}".format(sqs_message["account"]),
"region:{}".format(sqs_message["region"]),
]
tags.extend(
[
"{key}:{value}".format(key=key, value=resource[key])
for key in resource.keys()
if key != "Tags"
]
)
if resource.get("Tags", False):
tags.extend(
[
"{key}:{value}".format(key=tag["Key"], value=tag["Value"])
for tag in resource["Tags"]
]
)
for metric_config in metric_config_map:
datadog_rendered_messages.append(
{
"metric": metric_config["metric_name"],
"points": (
date_time,
self._get_metric_value(metric_config=metric_config, tags=tags),
),
"tags": tags,
}
)
# eg: [{'metric': 'metric_name', 'points': (date_time, value),
# 'tags': ['tag1':'value', 'tag2':'value']}, ...]
return datadog_rendered_messages
def deliver_datadog_messages(self, datadog_message_packages, sqs_message):
if len(datadog_message_packages) > 0:
self.logger.info(
"Sending account:{account} policy:{policy} {resource}:{quantity} to DataDog".format(
account=sqs_message.get("account", ""),
policy=sqs_message["policy"]["name"],
resource=sqs_message["policy"]["resource"],
quantity=len(sqs_message["resources"]),
)
)
api.Metric.send(datadog_message_packages)
@staticmethod
def _get_metric_value(metric_config, tags):
metric_value = 1
metric_value_tag = metric_config.get("metric_value_tag", "default")
if metric_value_tag != "default":
for tag in tags:
if metric_value_tag in tag:
metric_value = float(tag[tag.find(":") + 1 :])
return metric_value
@staticmethod
def _get_metrics_config_to_resources_map(sqs_message):
metric_config_map = []
if (
sqs_message
and sqs_message.get("action", False)
and sqs_message["action"].get("to", False)
):
for to in sqs_message["action"].get("to", []):
if to.startswith("datadog://"):
parsed = urlparse(to)
metric_config_map.append(dict(parse_qsl(parsed.query)))
return metric_config_map
|
bd82ef2608f50e42dacbdbac598b537acb8ab233
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/CondTools/SiStrip/test/SiStripApvSimulationParametersBuilder_cfg.py
|
1e925fd4bdfc157c1079c4464a93efd439396a21
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 4,245
|
py
|
SiStripApvSimulationParametersBuilder_cfg.py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("ICALIB")
process.source = cms.Source("EmptySource",
numberEventsInRun = cms.untracked.uint32(1),
firstRun = cms.untracked.uint32(1)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.PoolDBOutputService = cms.Service("PoolDBOutputService",
BlobStreamerName = cms.untracked.string('TBufferBlobStreamingService'),
DBParameters = cms.PSet(
messageLevel = cms.untracked.int32(2),
authenticationPath = cms.untracked.string('/afs/cern.ch/cms/DB/conddb')
),
timetype = cms.untracked.string('runnumber'),
connect = cms.string('sqlite_file:SiStripConditionsDBFile.db'),
toPut = cms.VPSet(cms.PSet(
record = cms.string('SiStripApvSimulationParametersRcd'),
tag = cms.string('SiStripApvSimulationParameters_2016preVFP_v1')
))
)
process.apvSimParam = cms.ESSource("SiStripApvSimulationParametersESSource",
apvBaselines_nBinsPerBaseline=cms.untracked.uint32(82),
apvBaselines_minBaseline=cms.untracked.double(0.),
apvBaselines_maxBaseline=cms.untracked.double(738.),
apvBaselines_puBinEdges=cms.untracked.vdouble(0., 2., 4., 6., 8., 10., 12., 14., 16., 18., 20., 22., 24., 26., 28., 30., 32., 34., 36., 38., 40., 42., 44., 46., 48., 50.),
apvBaselines_zBinEdges=cms.untracked.vdouble(0., 10., 20., 30., 40., 50., 60., 70., 90.),
apvBaselines_rBinEdges_TID=cms.untracked.vdouble(0., 10., 20., 30., 40., 50., 60., 70., 90.),
apvBaselines_rBinEdges_TEC=cms.untracked.vdouble(0., 10., 20., 30., 40., 50., 60., 70., 90.),
apvBaselinesFile_tib1=cms.untracked.FileInPath("SimTracker/SiStripDigitizer/data/APVBaselines_TIB1_11us.txt"),
apvBaselinesFile_tib2=cms.untracked.FileInPath("SimTracker/SiStripDigitizer/data/APVBaselines_TIB2_14us.txt"),
apvBaselinesFile_tib3=cms.untracked.FileInPath("SimTracker/SiStripDigitizer/data/APVBaselines_TIB3_15us.txt"),
apvBaselinesFile_tib4=cms.untracked.FileInPath("SimTracker/SiStripDigitizer/data/APVBaselines_TIB4_18us.txt"),
apvBaselinesFile_tob1=cms.untracked.FileInPath("SimTracker/SiStripDigitizer/data/APVBaselines_TOB1_10us.txt"),
apvBaselinesFile_tob2=cms.untracked.FileInPath("SimTracker/SiStripDigitizer/data/APVBaselines_TOB2_12us.txt"),
apvBaselinesFile_tob3=cms.untracked.FileInPath("SimTracker/SiStripDigitizer/data/APVBaselines_TOB3_15us.txt"),
apvBaselinesFile_tob4=cms.untracked.FileInPath("SimTracker/SiStripDigitizer/data/APVBaselines_TOB4_19us.txt"),
apvBaselinesFile_tob5=cms.untracked.FileInPath("SimTracker/SiStripDigitizer/data/APVBaselines_TOB5_24us.txt"),
apvBaselinesFile_tob6=cms.untracked.FileInPath("SimTracker/SiStripDigitizer/data/APVBaselines_TOB6_25us.txt"),
apvBaselinesFile_tid1=cms.untracked.FileInPath("SimTracker/SiStripDigitizer/data/APVBaselines_TID1_9us.txt"),
apvBaselinesFile_tid2=cms.untracked.FileInPath("SimTracker/SiStripDigitizer/data/APVBaselines_TID2_9us.txt"),
apvBaselinesFile_tid3=cms.untracked.FileInPath("SimTracker/SiStripDigitizer/data/APVBaselines_TID3_9us.txt"),
apvBaselinesFile_tec1=cms.untracked.FileInPath("SimTracker/SiStripDigitizer/data/APVBaselines_TEC1_10us.txt"),
apvBaselinesFile_tec2=cms.untracked.FileInPath("SimTracker/SiStripDigitizer/data/APVBaselines_TEC2_11us.txt"),
apvBaselinesFile_tec3=cms.untracked.FileInPath("SimTracker/SiStripDigitizer/data/APVBaselines_TEC3_11us.txt"),
apvBaselinesFile_tec4=cms.untracked.FileInPath("SimTracker/SiStripDigitizer/data/APVBaselines_TEC4_13us.txt"),
apvBaselinesFile_tec5=cms.untracked.FileInPath("SimTracker/SiStripDigitizer/data/APVBaselines_TEC5_14us.txt"),
apvBaselinesFile_tec6=cms.untracked.FileInPath("SimTracker/SiStripDigitizer/data/APVBaselines_TEC6_15us.txt"),
apvBaselinesFile_tec7=cms.untracked.FileInPath("SimTracker/SiStripDigitizer/data/APVBaselines_TEC7_16us.txt"),
apvBaselinesFile_tec8=cms.untracked.FileInPath("SimTracker/SiStripDigitizer/data/APVBaselines_TEC8_16us.txt"),
apvBaselinesFile_tec9=cms.untracked.FileInPath("SimTracker/SiStripDigitizer/data/APVBaselines_TEC9_16us.txt")
)
process.prod = cms.EDAnalyzer("SiStripApvSimulationParametersBuilder")
process.p = cms.Path(process.prod)
|
4ba7723b10221460369913e0a27d7490ed8e6313
|
5ef6c8d47864f471e26b9902d61f8c687e941f05
|
/src/genie/libs/parser/nxos/tests/RunBashTop/cli/equal/golden_output2_expected.py
|
5864069d4c3e48309c6ef9dadf9bd6ecb46612c0
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genieparser
|
169c196558f1c1a0f0d10650876096f993224917
|
b531eff760b2e44cd69d7a2716db6f866907c239
|
refs/heads/master
| 2023-09-03T08:56:18.831340
| 2023-08-29T22:32:02
| 2023-08-29T22:32:02
| 131,621,824
| 247
| 409
|
Apache-2.0
| 2023-08-29T22:32:04
| 2018-04-30T16:51:50
|
Python
|
UTF-8
|
Python
| false
| false
| 1,481
|
py
|
golden_output2_expected.py
|
expected_output = {}
# TODO: need to update unittests.py to use output instead of mocked output
# {
# "time": "07:46:26",
# "up": "1:55",
# "user": 2,
# "load_average": {"one_min": 1.89, "five_mins": 1.11, "fifteen_mins": 0.63},
# "tasks": {"total": 299, "running": 1, "sleeping": 298, "stopped": 0, "zombie": 0},
# "cpus": {
# "us": 2.8,
# "sy": 3.5,
# "ni": 0.1,
# "id": 92.6,
# "wa": 0.3,
# "hi": 0.6,
# "si": 0.2,
# "st": 0.0,
# },
# "mib_mem": {
# "total": 7757.51,
# "free": 1652.19,
# "used": 3648.47,
# "buff_cache": 82.78,
# },
# "mib_swap": {"total": 0.0, "used": 0.0, "free": 0.0, "avail_mem": 2374.07},
# "pids": {
# 28216: {
# "user": "root",
# "pr": 20,
# "ni": 0,
# "virt": 377.66,
# "res": 49.59,
# "shr": 31.47,
# "s": "S",
# "cpu_percent": 14.0,
# "mem_percent": 0.7,
# "time": "7:13.20",
# "command": "l2fwder",
# },
# 52: {
# "user": "root",
# "pr": 25,
# "ni": 5,
# "virt": 0.0,
# "res": 0.0,
# "shr": 0.0,
# "s": "S",
# "cpu_percent": 10.0,
# "mem_percent": 0.0,
# "time": "6:47.36",
# "command": "ksmd",
# },
# },
# }
|
5b68267874c6a531012310fe7b351d9bca61cc34
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/DQM/L1TMonitor/python/L1TdeStage2BMTF_cfi.py
|
195ba1806a810a9a058f3d516eaed64c17815264
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 1,099
|
py
|
L1TdeStage2BMTF_cfi.py
|
import FWCore.ParameterSet.Config as cms
# List of bins to ignore
ignoreBinsDeStage2Bmtf = [1]
# compares the unpacked BMTF regional muon collection to the emulated BMTF regional muon collection
# only muons that do not match are filled in the histograms
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
l1tdeStage2Bmtf = DQMEDAnalyzer(
"L1TStage2RegionalMuonCandComp",
regionalMuonCollection1 = cms.InputTag("bmtfDigis", "BMTF"),
# regionalMuonCollection2 = cms.InputTag("valBmtfDigis", "BMTF"), # didn't remove the default config
regionalMuonCollection2 = cms.InputTag("valBmtfAlgoSel", "BMTF"),
monitorDir = cms.untracked.string("L1TEMU/L1TdeStage2BMTF"),
regionalMuonCollection1Title = cms.untracked.string("BMTF data"),
regionalMuonCollection2Title = cms.untracked.string("BMTF emulator"),
summaryTitle = cms.untracked.string("Summary of comparison between BMTF muons and BMTF emulator muons"),
ignoreBin = cms.untracked.vint32(ignoreBinsDeStage2Bmtf),
verbose = cms.untracked.bool(False),
hasDisplacementInfo = cms.untracked.bool(True)
)
|
cd59ed1a9270e86902abe844b389ea5156235daf
|
957ed55f3bcaa8c4e54cd5a31998b7bdf4776af4
|
/Find Product.py
|
47fa7c6c89f707766c967e1b3d5c15ee967bce65
|
[] |
no_license
|
parasjain-12/HackerEarth-Solution
|
242117d1c2603801a417ac718d29e8e952fc199b
|
e2156145ab71aecfc09c88186e2baf82a02d5ef1
|
refs/heads/master
| 2022-05-06T16:02:46.207898
| 2022-04-18T08:26:03
| 2022-04-18T08:26:03
| 147,183,587
| 181
| 99
| null | 2021-07-21T13:25:52
| 2018-09-03T09:38:48
|
Python
|
UTF-8
|
Python
| false
| false
| 129
|
py
|
Find Product.py
|
x = pow(10,9)+7
n = int(input())
l = list(map(int,input().split()))
ans =1
for i in range(n):
ans = (ans*l[i])%x
print(ans)
|
1f4c5fc85235caabd1c45043f9a09e85c67c38ec
|
78f5c927d37b04e2383aef5302255af6a7cbb81e
|
/controller_manager_tests/test/controller_manager_interface_test.py
|
50f11107749617bdbb6d599fe12c73ecf8c5cf9a
|
[
"BSD-3-Clause"
] |
permissive
|
ros-controls/ros_control
|
239ecaa8e72a3b6289f39ede4adddd831f457a21
|
12edce55d89c1ddf68ad903407af7809e712b857
|
refs/heads/noetic-devel
| 2023-08-12T22:35:03.484122
| 2023-02-08T18:24:26
| 2023-02-08T18:24:26
| 6,607,925
| 460
| 293
|
BSD-3-Clause
| 2023-02-17T09:13:17
| 2012-11-09T03:09:38
|
C++
|
UTF-8
|
Python
| false
| false
| 2,467
|
py
|
controller_manager_interface_test.py
|
#!/usr/bin/env python
import unittest
from controller_manager import controller_manager_interface
class TestUtils(unittest.TestCase):
def test_scripts(self):
# load my_controller1.
self.assertTrue(controller_manager_interface.load_controller('my_controller1'))
# load a non-existent controller.
self.assertFalse(controller_manager_interface.load_controller('non_existent_controller'))
# start my_controller1.
self.assertTrue(controller_manager_interface.start_controller('my_controller1'))
# start my_controller3 which hasn't been loaded.
self.assertFalse(controller_manager_interface.start_controller('my_controller3'))
# stop my_controller1
self.assertTrue(controller_manager_interface.stop_controller('my_controller1'))
# load my_controller3.
self.assertTrue(controller_manager_interface.load_controller('my_controller3'))
# start my_controller1 and my_controller3
self.assertTrue(controller_manager_interface.start_controllers(('my_controller1', 'my_controller3')))
# stop my_controller1 and my_controller3
self.assertTrue(controller_manager_interface.stop_controllers(('my_controller1', 'my_controller3')))
# reload libraries and restore controllers
self.assertTrue(controller_manager_interface.reload_libraries(force_kill=True, restore=True))
# unload my_controller1.
self.assertTrue(controller_manager_interface.unload_controller('my_controller1'))
# unload my_controller2.
self.assertTrue(controller_manager_interface.unload_controller('my_controller3'))
# load my_controller1.
self.assertTrue(controller_manager_interface.load_controller('my_controller1'))
# reload librareis when some controllers are loaded.
self.assertFalse(controller_manager_interface.reload_libraries(force_kill=False, restore=True))
# reload librareis when controllers are loaded with force_kill=True.
self.assertTrue(controller_manager_interface.reload_libraries(force_kill=True, restore=False))
# reload librareis when no controllers are loaded.
self.assertTrue(controller_manager_interface.reload_libraries(force_kill=False, restore=False))
if __name__ == '__main__':
import rostest
rostest.rosrun('controller_manager_msgs',
'controller_manager_scripts_rostest',
TestUtils)
|
c180263ab9a9eab8891bb5a3785405df88723732
|
d1c2d00078520cd556f60b7213c27856f8b3460d
|
/sdks/python/apache_beam/examples/sql_taxi.py
|
e8a29806d72a6dd382e3ef5d77d82b8e6f6316a4
|
[
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-protobuf",
"Apache-2.0",
"Python-2.0"
] |
permissive
|
apache/beam
|
ed11b9e043465c720659eac20ac71b5b171bfa88
|
6d5048e05087ea54abc889ce402ae2a0abb9252b
|
refs/heads/master
| 2023-09-04T07:41:07.002653
| 2023-09-01T23:01:05
| 2023-09-01T23:01:05
| 50,904,245
| 7,061
| 4,522
|
Apache-2.0
| 2023-09-14T21:43:38
| 2016-02-02T08:00:06
|
Java
|
UTF-8
|
Python
| false
| false
| 3,930
|
py
|
sql_taxi.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""An example that processes streaming NYC Taxi data with SqlTransform.
This example reads from the PubSub NYC Taxi stream described in
https://github.com/googlecodelabs/cloud-dataflow-nyc-taxi-tycoon, aggregates
the data in 15s windows using SqlTransform, and writes the output to
a user-defined PubSub topic.
A Java version supported by Beam must be installed locally to run this pipeline.
Additionally, Docker must also be available to run this pipeline locally.
"""
# pytype: skip-file
import json
import logging
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.transforms.sql import SqlTransform
def run(output_topic, pipeline_args):
pipeline_options = PipelineOptions(
pipeline_args, save_main_session=True, streaming=True)
with beam.Pipeline(options=pipeline_options) as pipeline:
_ = (
pipeline
| beam.io.ReadFromPubSub(
topic='projects/pubsub-public-data/topics/taxirides-realtime',
timestamp_attribute="ts").with_output_types(bytes)
| "Parse JSON payload" >> beam.Map(json.loads)
# Use beam.Row to create a schema-aware PCollection
| "Create beam Row" >> beam.Map(
lambda x: beam.Row(
ride_status=str(x['ride_status']),
passenger_count=int(x['passenger_count'])))
# SqlTransform will computes result within an existing window
| "15s fixed windows" >> beam.WindowInto(beam.window.FixedWindows(15))
# Aggregate drop offs and pick ups that occur within each 15s window
| SqlTransform(
"""
SELECT
ride_status,
COUNT(*) AS num_rides,
SUM(passenger_count) AS total_passengers
FROM PCOLLECTION
WHERE NOT ride_status = 'enroute'
GROUP BY ride_status""")
# SqlTransform yields python objects with attributes corresponding to
# the outputs of the query.
# Collect those attributes, as well as window information, into a dict
| "Assemble Dictionary" >> beam.Map(
lambda row,
window=beam.DoFn.WindowParam: {
"ride_status": row.ride_status,
"num_rides": row.num_rides,
"total_passengers": row.total_passengers,
"window_start": window.start.to_rfc3339(),
"window_end": window.end.to_rfc3339()
})
| "Convert to JSON" >> beam.Map(json.dumps)
| "UTF-8 encode" >> beam.Map(lambda s: s.encode("utf-8"))
| beam.io.WriteToPubSub(topic=output_topic))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'--output_topic',
dest='output_topic',
required=True,
help=(
'Cloud PubSub topic to write to (e.g. '
'projects/my-project/topics/my-topic), must be created prior to '
'running the pipeline.'))
known_args, pipeline_args = parser.parse_known_args()
run(known_args.output_topic, pipeline_args)
|
a61d138394e46386b9add0643ecf17e7937eca3c
|
091a6200be74bf6577c86f623665bcc24e16b02b
|
/Step_Switch_Pico_Demo/code.py
|
d9971a16cb42bca83b5c7051bb07c9d59eb84ef0
|
[
"MIT"
] |
permissive
|
adafruit/Adafruit_Learning_System_Guides
|
b5f7bce40a16da64e7a79d4b39de032f2cca41d4
|
5eaa7a15a437c533b89f359a25983e24bb6b5438
|
refs/heads/main
| 2023-09-05T18:31:41.621956
| 2023-09-05T15:36:09
| 2023-09-05T15:36:09
| 105,065,494
| 937
| 937
|
MIT
| 2023-09-12T18:48:53
| 2017-09-27T20:22:44
|
C
|
UTF-8
|
Python
| false
| false
| 4,059
|
py
|
code.py
|
# SPDX-FileCopyrightText: 2022 john park for Adafruit Industries
# SPDX-License-Identifier: MIT
# Pico Four Step Switch Keypad Demo
import time
import board
import keypad
from digitalio import Direction, DigitalInOut
board_led = DigitalInOut(board.LED)
board_led.direction = Direction.OUTPUT
board_led.value = True
switch_pins = (board.GP6, board.GP7, board.GP8, board.GP9)
keys = keypad.Keys(switch_pins, value_when_pressed=False, pull=True)
led_pins = (board.GP2, board.GP3, board.GP4, board.GP5)
leds = []
for led_pin in led_pins:
tmp_led_pin = DigitalInOut(led_pin)
tmp_led_pin.direction = Direction.OUTPUT
tmp_led_pin.value = False
leds.append(tmp_led_pin)
def blink_led(led_num, pause, repeat):
for __ in range(repeat * 2):
leds[led_num].value = not leds[led_num].value
time.sleep(pause)
def blink_all_leds(pause, repeat):
for __ in range(repeat * 2):
for led in leds:
led.value = not led.value
time.sleep(pause)
blink_all_leds(0.1, 4)
mode_picked = False # state of mode selection
mode_choice = 0 # MIDI mode, desk switcher mode, etc.
modes = (0, 1, 2, 3)
mode_names = ("MIDI", "DESK", "SELECTOR", "COPY-PASTE")
print("Select the mode by pressing a button: MIDI, DESK, SELECTOR, or COPY-PASTE")
while not mode_picked: # program waits for a mode to be picked
key = keys.events.get()
if key:
if key.pressed:
mode_choice = key.key_number
print(mode_names[mode_choice], "mode")
mode_picked = True
if mode_choice == 0: # MIDI mode
import usb_midi
import adafruit_midi
from adafruit_midi.control_change import ControlChange
midi = adafruit_midi.MIDI(
midi_in=usb_midi.ports[0],
in_channel=0,
midi_out=usb_midi.ports[1],
out_channel=0
)
cc_num = [16, 17, 18, 19]
cc_state = [False, False, False, False]
else: # HID modes
import usb_hid
from adafruit_hid.keyboard import Keyboard
from adafruit_hid.keycode import Keycode
if mode_choice == 1: # Mac Desktop switcher mode
kpd = Keyboard(usb_hid.devices)
MODIFIER = Keycode.CONTROL
KEYMAP = (
("Desktop 1", [MODIFIER, Keycode.ONE]),
("Desktop 2", [MODIFIER, Keycode.TWO]),
("Desktop 3", [MODIFIER, Keycode.THREE]),
("Desktop 4", [MODIFIER, Keycode.FOUR]),
)
if mode_choice == 2: # SELECTOR mode for game weapon slot, Wirecast, etc.
kpd = Keyboard(usb_hid.devices)
MODIFIER = Keycode.SHIFT
KEYMAP = (
("Selector 1", [MODIFIER, Keycode.ONE]),
("Selector 2", [MODIFIER, Keycode.TWO]),
("Selector 3", [MODIFIER, Keycode.THREE]),
("Selector 4", [MODIFIER, Keycode.FOUR]),
)
if mode_choice == 3: # Copy/Paste mode
kpd = Keyboard(usb_hid.devices)
# Choose the correct modifier key for Windows or Mac.
# MODIFIER = Keycode.CONTROL # For Windows
MODIFIER = Keycode.COMMAND
KEYMAP = (
("Copy/Paste 1", [MODIFIER, Keycode.A]), # select all
("Copy/Paste 2", [MODIFIER, Keycode.X]), # cut
("Copy/Paste 3", [MODIFIER, Keycode.C]), # copy
("Copy/Paste 4", [MODIFIER, Keycode.V]), # paste
)
blink_led(mode_choice, 0.1, 3)
while True:
key = keys.events.get()
if key:
if key.pressed:
i = key.key_number
print(i, "pressed")
if mode_choice == 0:
leds[i].value = not leds[i].value
if cc_state[i] is False:
midi.send(ControlChange(cc_num[i], 127))
cc_state[i] = True
else:
midi.send(ControlChange(cc_num[i], 0))
cc_state[i] = False
else:
print(KEYMAP[i][0])
kpd.send(*KEYMAP[i][1])
for switch_led in leds: # blank the LEDs first
switch_led.value = False
leds[i].value = True # light selected switch LED
|
fa8d08fef04fbf755decdef663efb6f1a2380eab
|
efcd21234f3291e8fc561f49a7c88fc57a63e952
|
/tests/unit/language/ast/test_inline_fragment.py
|
62879c91d0f864dc16c6c092e14ab623fc94ca67
|
[
"MIT"
] |
permissive
|
tartiflette/tartiflette
|
146214a43847d2f423bf74594643c1fdefc746f1
|
421c1e937f553d6a5bf2f30154022c0d77053cfb
|
refs/heads/master
| 2023-09-01T02:40:05.974025
| 2022-01-20T14:55:31
| 2022-01-20T14:55:31
| 119,035,565
| 586
| 39
|
MIT
| 2023-09-11T07:49:27
| 2018-01-26T09:56:10
|
Python
|
UTF-8
|
Python
| false
| false
| 4,931
|
py
|
test_inline_fragment.py
|
import pytest
from tartiflette.language.ast import InlineFragmentNode
def test_inlinefragmentnode__init__():
inline_fragment_node = InlineFragmentNode(
selection_set="inlineFragmentSelectionSet",
type_condition="inlineFragmentTypeCondition",
directives="inlineFragmentDirectives",
location="inlineFragmentLocation",
)
assert inline_fragment_node.selection_set == "inlineFragmentSelectionSet"
assert inline_fragment_node.type_condition == "inlineFragmentTypeCondition"
assert inline_fragment_node.directives == "inlineFragmentDirectives"
assert inline_fragment_node.location == "inlineFragmentLocation"
@pytest.mark.parametrize(
"inline_fragment_node,other,expected",
[
(
InlineFragmentNode(
selection_set="inlineFragmentSelectionSet",
type_condition="inlineFragmentTypeCondition",
directives="inlineFragmentDirectives",
location="inlineFragmentLocation",
),
Ellipsis,
False,
),
(
InlineFragmentNode(
selection_set="inlineFragmentSelectionSet",
type_condition="inlineFragmentTypeCondition",
directives="inlineFragmentDirectives",
location="inlineFragmentLocation",
),
InlineFragmentNode(
selection_set="inlineFragmentSelectionSetBis",
type_condition="inlineFragmentTypeCondition",
directives="inlineFragmentDirectives",
location="inlineFragmentLocation",
),
False,
),
(
InlineFragmentNode(
selection_set="inlineFragmentSelectionSet",
type_condition="inlineFragmentTypeCondition",
directives="inlineFragmentDirectives",
location="inlineFragmentLocation",
),
InlineFragmentNode(
selection_set="inlineFragmentSelectionSet",
type_condition="inlineFragmentTypeConditionBis",
directives="inlineFragmentDirectives",
location="inlineFragmentLocation",
),
False,
),
(
InlineFragmentNode(
selection_set="inlineFragmentSelectionSet",
type_condition="inlineFragmentTypeCondition",
directives="inlineFragmentDirectives",
location="inlineFragmentLocation",
),
InlineFragmentNode(
selection_set="inlineFragmentSelectionSet",
type_condition="inlineFragmentTypeCondition",
directives="inlineFragmentDirectivesBis",
location="inlineFragmentLocation",
),
False,
),
(
InlineFragmentNode(
selection_set="inlineFragmentSelectionSet",
type_condition="inlineFragmentTypeCondition",
directives="inlineFragmentDirectives",
location="inlineFragmentLocation",
),
InlineFragmentNode(
selection_set="inlineFragmentSelectionSet",
type_condition="inlineFragmentTypeCondition",
directives="inlineFragmentDirectives",
location="inlineFragmentLocationBis",
),
False,
),
(
InlineFragmentNode(
selection_set="inlineFragmentSelectionSet",
type_condition="inlineFragmentTypeCondition",
directives="inlineFragmentDirectives",
location="inlineFragmentLocation",
),
InlineFragmentNode(
selection_set="inlineFragmentSelectionSet",
type_condition="inlineFragmentTypeCondition",
directives="inlineFragmentDirectives",
location="inlineFragmentLocation",
),
True,
),
],
)
def test_inlinefragmentnode__eq__(inline_fragment_node, other, expected):
assert (inline_fragment_node == other) is expected
@pytest.mark.parametrize(
"inline_fragment_node,expected",
[
(
InlineFragmentNode(
selection_set="inlineFragmentSelectionSet",
type_condition="inlineFragmentTypeCondition",
directives="inlineFragmentDirectives",
location="inlineFragmentLocation",
),
"InlineFragmentNode("
"type_condition='inlineFragmentTypeCondition', "
"directives='inlineFragmentDirectives', "
"selection_set='inlineFragmentSelectionSet', "
"location='inlineFragmentLocation')",
)
],
)
def test_inlinefragmentnode__repr__(inline_fragment_node, expected):
assert inline_fragment_node.__repr__() == expected
|
f640d7339dbbefb2f035abdf7da82417b60131ba
|
483424524c70852cc043e0d77bf1b757a61d797a
|
/tests/unit/model_parallelism/test_configurable_parallel_mp.py
|
824ecea5f1442610cab2cac45e24670db02ca33a
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
microsoft/DeepSpeed
|
810f1af320020718d0794f5a97cde6f1d17af122
|
55d9964c59c0c6e23158b5789a5c36c28939a7b0
|
refs/heads/master
| 2023-09-06T07:40:52.145692
| 2023-09-05T23:51:23
| 2023-09-05T23:51:23
| 235,860,204
| 27,557
| 3,347
|
Apache-2.0
| 2023-09-14T21:38:46
| 2020-01-23T18:35:18
|
Python
|
UTF-8
|
Python
| false
| false
| 6,611
|
py
|
test_configurable_parallel_mp.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import torch
import deepspeed
import pytest
import random
import numpy as np
import deepspeed.comm as dist
from deepspeed.accelerator import get_accelerator
from unit.common import DistributedTest, DistributedFixture
from unit.megatron_model import get_gpt2_model, get_megatron_version
from deepspeed.runtime.utils import required_torch_version
pytestmark = pytest.mark.skipif(not required_torch_version(min_version=1.5, max_version=1.13),
reason='Megatron-LM package requires Pytorch version >=1.5 and <=1.13')
# TODO: integrated testing of TP and ZeRO 1/2/3
def get_deepspeed_model(model):
ds_config_dict = {
"train_micro_batch_size_per_gpu": 1,
"optimizer": {
"type": "Lamb",
"params": {
"lr": 0.00015
}
},
}
from megatron import mpu
model, _, _, _ = deepspeed.initialize(model=model,
mpu=mpu,
model_parameters=model.parameters(),
config=ds_config_dict)
return model
class ConfigurableMP(DistributedTest):
@pytest.fixture(autouse=True)
def reset_random(self, seed=1234):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
get_accelerator().manual_seed_all(seed)
@pytest.fixture
def inputs(self, bs=1, seq_len=20):
input_ids = torch.randint(low=0, high=1000, size=(bs, seq_len))
position_ids = torch.randint(low=0, high=2, size=(bs, seq_len))
attention_mask = torch.randint(low=0, high=2, size=(bs, seq_len), dtype=torch.bool)
return [input_ids, position_ids, attention_mask]
class TestConfigurableMP(ConfigurableMP):
@pytest.mark.world_size(1)
@pytest.mark.skip(reason="megatron-lm is currently broken so this test cannot be run.")
def test_gpt2_basic(self, tmpdir, inputs):
args_defaults = {
'num_layers': 2,
'hidden_size': 128,
'num_attention_heads': 8,
'max_position_embeddings': 128,
}
model = get_gpt2_model(args_defaults)
model = get_deepspeed_model(model)
model.eval()
device_name = get_accelerator().device_name()
baseline = model(inputs[0].to(device_name), inputs[1].to(device_name), inputs[2].to(device_name))
tag = 'mp_1'
state_dict = {}
state_dict['checkpoint_version'] = get_megatron_version()
model.save_checkpoint(tmpdir, tag=tag, client_state=state_dict)
dist.barrier()
model.load_checkpoint(tmpdir, tag=tag, load_optimizer_states=False, load_lr_scheduler_states=False)
test = model(inputs[0], inputs[1], inputs[2])
assert torch.allclose(baseline, test,
atol=1e-07), f"Baseline output {baseline} is not equal to save-then-load output {test}"
@pytest.mark.world_size(2)
@pytest.mark.skip(reason="megatron-lm is currently broken so this test cannot be run.")
def test_gpt2_mp2_no_resize(self, tmpdir, inputs):
args_defaults = {
'num_layers': 2,
'hidden_size': 128,
'num_attention_heads': 8,
'max_position_embeddings': 128,
}
model = get_gpt2_model(args_defaults, mp_size=2)
model = get_deepspeed_model(model)
model.eval()
device_name = get_accelerator().device_name()
baseline = model(inputs[0].to(device_name), inputs[1].to(device_name), inputs[2].to(device_name))
tag = 'mp_2'
state_dict = {}
state_dict['checkpoint_version'] = get_megatron_version()
model.save_checkpoint(tmpdir, tag=tag, client_state=state_dict)
dist.barrier()
model.load_checkpoint(tmpdir, tag=tag, load_optimizer_states=False, load_lr_scheduler_states=False)
device_name = get_accelerator().device_name()
test = model(inputs[0].to(device_name), inputs[1].to(device_name), inputs[2].to(device_name))
assert torch.allclose(baseline, test, rtol=1.0,
atol=1e-07), f"Baseline output {baseline} is not equal to save-then-load output {test}"
# This fixture provides the baseline model with mp=2 to TestConfigurableMPResize
class baseline_mp2(DistributedFixture):
world_size = 2
def run(self, inputs, class_tmpdir):
args_defaults = {
'num_layers': 2,
'hidden_size': 128,
'num_attention_heads': 8,
'max_position_embeddings': 128,
}
model = get_gpt2_model(args_defaults, mp_size=self.world_size)
model = get_deepspeed_model(model)
model.eval()
with torch.no_grad():
device_name = get_accelerator().device_name()
baseline = model(inputs[0].to(device_name), inputs[1].to(device_name), inputs[2].to(device_name))
if dist.get_rank() == 0:
save_path = os.path.join(class_tmpdir, "output.pt")
torch.save(baseline.cpu(), save_path)
state_dict = {}
state_dict['checkpoint_version'] = get_megatron_version()
model.save_checkpoint(class_tmpdir, client_state=state_dict)
class TestConfigurableResizeMP(ConfigurableMP):
world_size = [1, 4]
@pytest.mark.skip(reason="megatron-lm is currently broken so this test cannot be run.")
def test(self, baseline_mp2, inputs, class_tmpdir):
args_defaults = {
'num_layers': 2,
'hidden_size': 128,
'num_attention_heads': 8,
'max_position_embeddings': 128,
}
world_size = os.environ["WORLD_SIZE"]
model = get_gpt2_model(args_defaults, mp_size=world_size)
model = get_deepspeed_model(model)
model.eval()
with torch.no_grad():
model.load_checkpoint(class_tmpdir, load_optimizer_states=False, load_lr_scheduler_states=False)
device_name = get_accelerator().device_name()
test = model(inputs[0].to(device_name), inputs[1].to(device_name), inputs[2].to(device_name))
if dist.get_rank() == 0:
load_path = os.path.join(class_tmpdir, "output.pt")
baseline = torch.load(load_path)
test = test.cpu()
assert torch.allclose(
baseline, test,
atol=1e-03), f"Baseline output {baseline} is not equal to save-then-load output {test}"
|
32b318004b31bef2300288f192f49ec5dc85684f
|
0529610da235bac1490c4f0b28a58258f05f116c
|
/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/TEZ/package/scripts/tez_client.py
|
8c20a7302e88bce2af697b29e99460761275a4fb
|
[
"GPL-1.0-or-later",
"GPL-2.0-or-later",
"OFL-1.1",
"MS-PL",
"AFL-2.1",
"GPL-2.0-only",
"Python-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
apache/ambari
|
be1616f332615ef55fc1a47e84353f3e5a45732d
|
23881f23577a65de396238998e8672d6c4c5a250
|
refs/heads/trunk
| 2023-08-31T23:01:31.954106
| 2023-08-28T06:29:01
| 2023-08-28T06:29:01
| 2,442,457
| 2,078
| 1,957
|
Apache-2.0
| 2023-09-14T16:56:03
| 2011-09-23T07:00:08
|
Java
|
UTF-8
|
Python
| false
| false
| 5,517
|
py
|
tez_client.py
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
import os
import urlparse
from ambari_commons import OSConst
from ambari_commons.inet_utils import download_file
from ambari_commons.os_family_impl import OsFamilyImpl
from ambari_commons.os_utils import copy_file, extract_path_component
from resource_management.core.exceptions import ClientComponentHasNoStatus
from resource_management.core.source import InlineTemplate
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions.get_stack_version import get_stack_version
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions.default import default
from resource_management.core.logger import Logger
from tez import tez
class TezClient(Script):
def configure(self, env, config_dir=None, upgrade_type=None):
"""
Write tez-site.xml and tez-env.sh to the config directory
:param env: Python Environment
:param config_dir: During rolling upgrade, which config directory to save configs to.
E.g., /usr/$STACK/current/tez-client/conf
"""
import params
env.set_params(params)
tez(config_dir)
def status(self, env):
raise ClientComponentHasNoStatus()
@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
class TezClientLinux(TezClient):
def stack_upgrade_save_new_config(self, env):
"""
Because this gets called during a Rolling Upgrade, the new tez configs have already been saved, so we must be
careful to only call configure() on the directory of the new version.
:param env:
"""
import params
env.set_params(params)
conf_select_name = "tez"
base_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
config_dir = self.get_config_dir_during_stack_upgrade(env, base_dir, conf_select_name)
if config_dir:
Logger.info("stack_upgrade_save_new_config(): Calling conf-select on %s using version %s" % (conf_select_name, str(params.version)))
# Because this script was called from ru_execute_tasks.py which already enters an Environment with its own basedir,
# must change it now so this function can find the Jinja Templates for the service.
env.config.basedir = base_dir
self.configure(env, config_dir=config_dir)
def pre_upgrade_restart(self, env, upgrade_type=None):
import params
env.set_params(params)
if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
stack_select.select_packages(params.version)
def install(self, env):
import params
self.install_packages(env)
self.configure(env, config_dir=params.tez_conf_dir)
@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
class TezClientWindows(TezClient):
def install(self, env):
import params
if params.tez_home_dir is None:
self.install_packages(env)
params.refresh_tez_state_dependent_params()
env.set_params(params)
self._install_lzo_support_if_needed(params)
self.configure(env, config_dir=params.tez_conf_dir)
def _install_lzo_support_if_needed(self, params):
hadoop_classpath_prefix = self._expand_hadoop_classpath_prefix(params.hadoop_classpath_prefix_template, params.tez_site_config)
hadoop_lzo_dest_path = extract_path_component(hadoop_classpath_prefix, "hadoop-lzo-")
if hadoop_lzo_dest_path:
hadoop_lzo_file = os.path.split(hadoop_lzo_dest_path)[1]
config = Script.get_config()
file_url = urlparse.urljoin(config['ambariLevelParams']['jdk_location'], hadoop_lzo_file)
hadoop_lzo_dl_path = os.path.join(config["agentLevelParams"]["agentCacheDir"], hadoop_lzo_file)
download_file(file_url, hadoop_lzo_dl_path)
#This is for protection against configuration changes. It will infect every new destination with the lzo jar,
# but since the classpath points to the jar directly we're getting away with it.
if not os.path.exists(hadoop_lzo_dest_path):
copy_file(hadoop_lzo_dl_path, hadoop_lzo_dest_path)
def _expand_hadoop_classpath_prefix(self, hadoop_classpath_prefix_template, configurations):
import resource_management
hadoop_classpath_prefix_obj = InlineTemplate(hadoop_classpath_prefix_template, configurations_dict=configurations,
extra_imports=[resource_management, resource_management.core,
resource_management.core.source])
hadoop_classpath_prefix = hadoop_classpath_prefix_obj.get_content()
return hadoop_classpath_prefix
if __name__ == "__main__":
TezClient().execute()
|
2e5d81e521a2f1829a49da6e22ba3bd05e77484c
|
fe255e9aee1d0c0f565054f2346221a836f869ab
|
/bot/exts/filtering/_settings_types/__init__.py
|
61b5737d4176214f818f830771d3528a83e0feb2
|
[
"MIT",
"BSD-3-Clause",
"Python-2.0",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
python-discord/bot
|
fe04810807f3a712911e76a4d4f0ef03a7eb5c86
|
f2048684291cc6358565e96ef3562512fbeb2505
|
refs/heads/main
| 2023-09-03T21:53:23.100718
| 2023-09-02T11:07:30
| 2023-09-02T11:07:30
| 120,088,519
| 1,479
| 1,091
|
MIT
| 2023-09-14T17:07:00
| 2018-02-03T12:20:25
|
Python
|
UTF-8
|
Python
| false
| false
| 381
|
py
|
__init__.py
|
from bot.exts.filtering._settings_types.actions import action_types
from bot.exts.filtering._settings_types.validations import validation_types
settings_types = {
"ActionEntry": {settings_type.name: settings_type for settings_type in action_types},
"ValidationEntry": {settings_type.name: settings_type for settings_type in validation_types}
}
__all__ = [settings_types]
|
84b72db9e4c11784e8776a101ee0a95fc1a0aef0
|
09a6d8dbad5b92f93791948b5bf9b75f5cb2e5ce
|
/tests/fourier/test_visualize.py
|
4c58493924e60a0f508a74fe9a21ac0003c718c1
|
[
"Apache-2.0"
] |
permissive
|
PennyLaneAI/pennylane
|
458efd5d9457e90ada31ca2ef0fb6bb96a24e9a7
|
0843183ff15a013c2622af5e61fea431d18076d3
|
refs/heads/master
| 2023-09-03T17:00:43.105784
| 2023-09-01T16:15:07
| 2023-09-01T16:15:07
| 129,936,360
| 1,431
| 410
|
Apache-2.0
| 2023-09-14T21:30:56
| 2018-04-17T16:45:42
|
Python
|
UTF-8
|
Python
| false
| false
| 11,870
|
py
|
test_visualize.py
|
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for :mod:`fourier` visualization functions.
"""
# pylint: disable=too-few-public-methods,too-many-arguments
import pytest
from pennylane import numpy as np
from pennylane.fourier.visualize import _validate_coefficients
from pennylane.fourier.visualize import (
violin,
bar,
box,
panel,
radial_box,
)
matplotlib = pytest.importorskip("matplotlib")
plt = matplotlib.pyplot
coeffs_1D_valid_1 = np.array([0.5, 0, 0.25j, 0.25j, 0])
coeffs_1D_valid_2 = [0.5, 0.1j, -0.25j, 0.25j, -0.1j]
coeffs_1D_invalid = np.array([0.5, 0, 0.25j, 0.25j])
coeffs_1D_valid_list = [coeffs_1D_valid_1, coeffs_1D_valid_2]
coeffs_2D_valid_1 = np.array(
[
[
0.07469786 + 0.0000e00j,
0.0 + 4.3984e-04j,
0.00101184 - 0.0000e00j,
0.00101184 + 0.0000e00j,
0.0 - 4.3984e-04j,
],
[
-0.03973803 - 1.9390e-03j,
0.0 + 0.0000e00j,
0.01986902 + 9.6950e-04j,
0.01986902 + 9.6950e-04j,
-0.0 + 0.0000e00j,
],
[
0.0121718 - 3.2000e-07j,
0.02703674 - 7.2000e-07j,
0.22464211 - 5.9600e-06j,
0.22464211 - 5.9600e-06j,
-0.02703674 + 7.2000e-07j,
],
[
0.0121718 + 3.2000e-07j,
-0.02703674 - 7.2000e-07j,
0.22464211 + 5.9600e-06j,
0.22464211 + 5.9600e-06j,
0.02703674 + 7.2000e-07j,
],
[
-0.03973803 + 1.9390e-03j,
-0.0 - 0.0000e00j,
0.01986902 - 9.6950e-04j,
0.01986902 - 9.6950e-04j,
0.0 - 0.0000e00j,
],
]
)
coeffs_2D_valid_2 = np.array(
[
[
0.12707831 + 0.0j,
-0.0 + 0.00014827j,
0.0271287 - 0.0j,
0.0271287 + 0.0j,
-0.0 - 0.00014827j,
],
[
0.14675568 - 0.0061323j,
0.0 + 0.0j,
-0.07337784 + 0.00306615j,
-0.07337784 + 0.00306615j,
-0.0 - 0.0j,
],
[
0.12201549 - 0.010611j,
0.10344825 - 0.00899631j,
0.14288853 - 0.01242621j,
0.14288853 - 0.01242621j,
-0.10344825 + 0.00899631j,
],
[
0.12201549 + 0.010611j,
-0.10344825 - 0.00899631j,
0.14288853 + 0.01242621j,
0.14288853 + 0.01242621j,
0.10344825 + 0.00899631j,
],
[
0.14675568 + 0.0061323j,
-0.0 + 0.0j,
-0.07337784 - 0.00306615j,
-0.07337784 - 0.00306615j,
0.0 - 0.0j,
],
]
)
coeffs_2D_valid_list = [coeffs_2D_valid_1, coeffs_2D_valid_2]
coeffs_2D_varying_degrees = np.zeros((5, 9), dtype=complex)
coeffs_2D_varying_degrees[:5, :5] = coeffs_2D_valid_2
coeffs_2D_invalid = np.array(
[
[
0.12707831 + 0.0j,
-0.0 + 0.00014827j,
0.0271287 - 0.0j,
0.0271287 + 0.0j,
-0.0 - 0.00014827j,
],
[
0.14675568 - 0.0061323j,
0.0 + 0.0j,
-0.07337784 + 0.00306615j,
-0.07337784 + 0.00306615j,
-0.0 - 0.0j,
],
[
0.12201549 - 0.010611j,
0.10344825 - 0.00899631j,
0.14288853 - 0.01242621j,
0.14288853 - 0.01242621j,
-0.10344825 + 0.00899631j,
],
[
0.12201549 + 0.010611j,
-0.10344825 - 0.00899631j,
0.14288853 + 0.01242621j,
0.14288853 + 0.01242621j,
0.10344825 + 0.00899631j,
],
]
)
coeffs_3D_valid = np.zeros((5, 5, 5), dtype=complex)
data = {
(0, 0, 1): -0.00882888 - 0.14568055j,
(0, 0, 4): -0.00882888 + 0.14568055j,
(0, 1, 0): 0.38262211 + 0.0j,
(0, 2, 0): -0.0 - 0.03218167j,
(0, 2, 1): 0.00441444 + 0.07284027j,
(0, 2, 4): 0.00441444 - 0.07284027j,
(0, 3, 0): -0.0 + 0.03218167j,
(0, 3, 1): 0.00441444 + 0.07284027j,
(0, 3, 4): 0.00441444 - 0.07284027j,
(0, 4, 0): 0.38262211 + 0.0j,
(2, 0, 1): 0.0019699 - 0.00293059j,
(2, 0, 4): -0.0023094 + 0.00267124j,
(2, 1, 0): 0.00439013 - 0.00574692j,
(2, 2, 0): 0.00047266 - 0.00061874j,
(2, 2, 1): -0.00098495 + 0.00146529j,
(2, 2, 4): 0.0011547 - 0.00133562j,
(2, 3, 0): -0.00047266 + 0.00061874j,
(2, 3, 1): -0.00098495 + 0.00146529j,
(2, 3, 4): 0.0011547 - 0.00133562j,
(2, 4, 0): 0.00439013 - 0.00574692j,
(3, 0, 1): -0.0023094 - 0.00267124j,
(3, 0, 4): 0.0019699 + 0.00293059j,
(3, 1, 0): 0.00439013 + 0.00574692j,
(3, 2, 0): -0.00047266 - 0.00061874j,
(3, 2, 1): 0.0011547 + 0.00133562j,
(3, 2, 4): -0.00098495 - 0.00146529j,
(3, 3, 0): 0.00047266 + 0.00061874j,
(3, 3, 1): 0.0011547 + 0.00133562j,
(3, 3, 4): -0.00098495 - 0.00146529j,
(3, 4, 0): 0.00439013 + 0.00574692j,
}
coeffs_3D_varying_degrees = np.zeros((3, 7, 5), dtype=complex)
for key, val in data.items():
coeffs_3D_valid[key] = val
key = (key[0] - 1 if key[0] > 0 else 0, *key[1:])
coeffs_3D_varying_degrees[key] = val
fig_valid, ax_valid = plt.subplots(2, 1, sharex=True, sharey=True)
fig_invalid, ax_invalid = plt.subplots(3, 1, sharex=True, sharey=True)
fig_radial_valid, ax_radial_valid = plt.subplots(
2, 1, sharex=True, sharey=True, subplot_kw=dict(polar=True)
)
fig_radial_invalid, ax_radial_invalid = plt.subplots(
3, 1, sharex=True, sharey=True, subplot_kw=dict(polar=True)
)
fig_panel_valid, ax_panel_valid = plt.subplots(5, 5, sharex=True, sharey=True)
fig_panel_1d_valid, ax_panel_1d_valid = plt.subplots(5, 1, sharex=True, sharey=True)
fig_panel_invalid, ax_panel_invalid = plt.subplots(3, 2, sharex=True, sharey=True)
class TestValidateCoefficients:
"""Test Fourier coefficients are properly validated/invalidated."""
@pytest.mark.parametrize(
"coeffs,n_inputs,can_be_list,expected_coeffs",
[
(coeffs_1D_valid_1, 1, True, np.array([coeffs_1D_valid_1])),
(coeffs_1D_valid_1, 1, False, np.array(coeffs_1D_valid_1)),
(coeffs_1D_valid_2, 1, True, np.array([coeffs_1D_valid_2])),
(coeffs_1D_valid_2, 1, False, coeffs_1D_valid_2),
(coeffs_2D_valid_1, 2, True, np.array([coeffs_2D_valid_1])),
(coeffs_2D_valid_list, 2, True, np.array(coeffs_2D_valid_list)),
(coeffs_3D_valid, 3, True, np.array([coeffs_3D_valid])),
(coeffs_3D_valid, 3, False, coeffs_3D_valid),
(coeffs_3D_varying_degrees, 3, True, np.array([coeffs_3D_varying_degrees])),
(coeffs_3D_varying_degrees, 3, False, coeffs_3D_varying_degrees),
],
)
def test_valid_fourier_coeffs(self, coeffs, n_inputs, can_be_list, expected_coeffs):
"""Check that valid parameters are properly processed."""
obtained_coeffs = _validate_coefficients(coeffs, n_inputs, can_be_list)
assert np.allclose(obtained_coeffs, expected_coeffs)
def test_incorrect_type_fourier_coeffs(self):
"""Check that invalid type of parameters is caught"""
with pytest.raises(TypeError, match="must be a list of numerical"):
_validate_coefficients("A", True)
@pytest.mark.parametrize(
"coeffs,n_inputs,can_be_list,expected_error_message",
[
(coeffs_1D_invalid, 1, True, "Shape of input coefficients must be 2d"),
(coeffs_1D_valid_1, 2, True, "Plotting function expected a list of"),
(coeffs_2D_invalid, 2, False, "Shape of input coefficients must be 2d_i"),
],
)
def test_invalid_fourier_coeffs(self, coeffs, n_inputs, can_be_list, expected_error_message):
"""Check invalid Fourier coefficient inputs are caught."""
with pytest.raises(ValueError, match=expected_error_message):
_validate_coefficients(coeffs, n_inputs, can_be_list)
class TestInvalidAxesPassing:
"""Test that axes of the incorrect type are not plotted on."""
@pytest.mark.parametrize(
"func,coeffs,n_inputs,ax,expected_error_message",
[
(
violin,
coeffs_1D_valid_1,
1,
ax_invalid,
"Matplotlib axis should consist of two subplots.",
),
(
box,
coeffs_1D_valid_2,
1,
ax_invalid,
"Matplotlib axis should consist of two subplots.",
),
(
bar,
coeffs_1D_valid_1,
1,
ax_invalid,
"Matplotlib axis should consist of two subplots.",
),
(
radial_box,
coeffs_2D_valid_list,
2,
ax_radial_invalid,
"Matplotlib axis should consist of two subplots.",
),
(
radial_box,
coeffs_2D_valid_list,
2,
ax_valid,
"Matplotlib axes for radial_box must be polar.",
),
(
panel,
coeffs_2D_valid_list,
2,
ax_panel_invalid,
"Shape of subplot axes must match the shape of the coefficient data.",
),
],
)
def test_invalid_axes(self, func, coeffs, n_inputs, ax, expected_error_message):
"""Test that invalid axes are not plotted on."""
with pytest.raises(ValueError, match=expected_error_message):
func(coeffs, n_inputs, ax)
class TestReturnType:
"""Test that the functions return an axis date type."""
@pytest.mark.parametrize(
"func,coeffs,n_inputs,ax,show_freqs",
[
(violin, coeffs_1D_valid_1, 1, ax_valid, True),
(violin, coeffs_1D_valid_1, 1, ax_valid, False),
(violin, coeffs_2D_varying_degrees, 2, ax_valid, True),
(box, coeffs_1D_valid_1, 1, ax_valid, True),
(box, coeffs_1D_valid_1, 1, ax_valid, False),
(box, coeffs_3D_valid, 3, ax_valid, True),
(bar, coeffs_1D_valid_1, 1, ax_valid, True),
(bar, coeffs_1D_valid_1, 1, ax_valid, False),
(bar, coeffs_3D_varying_degrees, 3, ax_valid, False),
(radial_box, coeffs_2D_valid_list, 2, ax_radial_valid, True),
(radial_box, coeffs_2D_valid_list, 2, ax_radial_valid, False),
(panel, coeffs_2D_valid_list, 2, ax_panel_valid, None),
(panel, coeffs_1D_valid_list, 1, ax_panel_1d_valid, None),
],
)
def test_correct_return_type(self, func, coeffs, n_inputs, ax, show_freqs):
"""Test that invalid axes are not plotted on."""
if show_freqs is None:
res = func(coeffs, n_inputs, ax)
else:
res = func(coeffs, n_inputs, ax, show_freqs=show_freqs)
assert isinstance(res, type(ax))
def test_panel_n_inputs():
"""Tests that error is raised if n_inputs not 1 or 2."""
with pytest.raises(ValueError, match="Panel plot function accepts input"):
panel(coeffs_1D_valid_list, 3, ax_panel_1d_valid)
|
b6804dd4dd70c00e39d803fb952ddcb6a19d8791
|
160f08e768d7271f9522ad2597ac4ee79c04477a
|
/src/c3nav/mapdata/migrations/0069_mapupdate_geometries_changed.py
|
b229567d4a45339c0422d69d6854c93f0abf654c
|
[
"Apache-2.0"
] |
permissive
|
c3nav/c3nav
|
6254724dfc8589ee03c6028577befd7c65b05857
|
1a4ef5caa06ddacc8d9370b5adcee248fd4f55f7
|
refs/heads/main
| 2023-08-04T08:36:18.431458
| 2023-07-24T09:57:18
| 2023-07-24T09:57:18
| 56,852,994
| 140
| 47
|
Apache-2.0
| 2023-07-05T22:55:27
| 2016-04-22T12:13:51
|
Python
|
UTF-8
|
Python
| false
| false
| 516
|
py
|
0069_mapupdate_geometries_changed.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-23 01:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mapdata', '0068_waytype_level_change_description'),
]
operations = [
migrations.AddField(
model_name='mapupdate',
name='geometries_changed',
field=models.BooleanField(default=True),
preserve_default=False,
),
]
|
585778e07c168f40dc9e4131d6f33421c377d591
|
0f59e486ea9d7c96b8c3f7f92bf063fc8389f1e8
|
/vivisect/symboliks/tests/data_amd64.py
|
27d9125cfe6fac46ab2cd8caa5a469fc6e0eb762
|
[
"Apache-2.0"
] |
permissive
|
vivisect/vivisect
|
ac259918b6281d9431c32a0b2307c61f9cab0dec
|
b07e161cc28b19fdda0d047eefafed22c5b00f15
|
refs/heads/master
| 2023-08-25T09:02:00.526532
| 2023-07-26T03:07:07
| 2023-07-26T03:07:07
| 26,651,759
| 833
| 181
|
Apache-2.0
| 2023-09-07T03:43:53
| 2014-11-14T18:28:47
|
Python
|
UTF-8
|
Python
| false
| false
| 6,308
|
py
|
data_amd64.py
|
effects = {
# instruction name -> [bytes, newva, contraints, effects]
'rdtsc': ('0f31', None, (),
('rdx = TSC_HIGH',
'rax = TSC_LOW')
),
'cwde': ('98', None, (),
('rax = signextend((rax & 0x0000ffff), 8)',)
),
'cdq': ('99', None, (),
('rax = signextend((rax & 0x0000ffff), 8)',
'rdx = (signextend((rax & 0x0000ffff), 16) >> 64)')
),
'div16': ('66f7f2', None, (),
('rax = (((((rdx & 0x0000ffff) << 16) | (rax & 0x0000ffff)) / (rdx & 0x0000ffff)) | (rax & 0xffffffffffff0000))',
'rdx = (((((rdx & 0x0000ffff) << 16) | (rax & 0x0000ffff)) % (rdx & 0x0000ffff)) | (rdx & 0xffffffffffff0000))',)
),
'div32': ('f7f1', None, (),
('rax = ((((rdx & 0xffffffff) << 32) | (rax & 0xffffffff)) / (rcx & 0xffffffff))',
'rdx = ((((rdx & 0xffffffff) << 32) | (rax & 0xffffffff)) % (rcx & 0xffffffff))',)
),
'div64': ('49f7f3', None, (),
('rax = (((rdx << 64) | rax) / r11)',
'rdx = (((rdx << 64) | rax) % r11)')
),
'ror': ('C1C90C', None, (),
('rcx = (((rcx & 0xffffffff) >> (12 % 32)) | ((rcx & 0xffffffff) << (32 - (12 % 32))))',)
),
'rol': ('49d3c3', None, (),
('r11 = ((r11 << ((rcx & 255) % 64)) | (r11 >> (64 - ((rcx & 255) % 64))))',)
),
# rcr ax, 1
'rcr': ('66d1d8', None, (),
('rax = (((((eflags_cf << 16) | (rax & 0x0000ffff)) >> (1 % 16)) | (((eflags_cf << 16) | (rax & 0x0000ffff)) << (16 - (1 % 16)))) | (rax & 0xffffffffffff0000))',
'eflags_cf = (((((eflags_cf << 16) | (rax & 0x0000ffff)) >> (1 % 16)) | (((eflags_cf << 16) | (rax & 0x0000ffff)) << (16 - (1 % 16)))) >> (16 - 1))')),
'rcl': ('66d1d0', None, (),
('rax = (((((eflags_cf << 16) | (rax & 0x0000ffff)) << (1 % 16)) | (((eflags_cf << 16) | (rax & 0x0000ffff)) >> (16 - (1 % 16)))) | (rax & 0xffffffffffff0000))',
'eflags_cf = (((((eflags_cf << 16) | (rax & 0x0000ffff)) << (1 % 16)) | (((eflags_cf << 16) | (rax & 0x0000ffff)) >> (16 - (1 % 16)))) >> (16 - 1))')),
# mulx r12, rax, rcx
'mulx': ('C462FBF6E1', None, (),
('r12 = ((rdx * rcx) & 0xffffffffffffffff)',
'rax = ((rdx * rcx) >> 64)')
),
# mul eax
'mul': ('f7e0', None, (),
('rax = (((rax & 0xffffffff) * (rax & 0xffffffff)) & 0xffffffff)',
'rdx = (((rax & 0xffffffff) * (rax & 0xffffffff)) >> 32)',
'eflags_of = ((((rax & 0xffffffff) * (rax & 0xffffffff)) >> 32) != 0)',
'eflags_cf = ((((rax & 0xffffffff) * (rax & 0xffffffff)) >> 32) != 0)')
),
# idiv r11
'idiv': ('49F7Fb', None, (),
('rax = (((rdx << 64) | rax) / r11)',
'rdx = (((rdx << 64) | rax) % r11)',)
),
# bt dword [0x41414141], ebx
'bt': ('0FA31C2541414141', None, (),
('[ (0x41414141 - 0) : 4 ]',
'eflags_cf = ((mem[(0x41414141 - 0):4] >> (rbx & 0xffffffff)) & 1)',)
),
# bts rax, rbx
'bts': ('480FABD8', None, (),
('eflags_cf = ((rax >> rbx) & 1)',
'rax = (rax | (1 << rbx))')),
# btc eax, ebx
'btc': ('0FBBD8', None, (),
('rax = ((rax & 0xffffffff) ^ (1 << (rbx & 0xffffffff)))',
'eflags_cf = (((rax & 0xffffffff) >> (rbx & 0xffffffff)) & 1)')
),
'btr': ('0FBA34254141414111', None, (),
('[ (0x41414141 - 0) : 4 ]', # the read effect
'[ (0x41414141 - 0) : 4 ] = (mem[(0x41414141 - 0):4] & 0xfffdffff)',
'eflags_cf = ((mem[(0x41414141 - 0):4] >> 17) & 1)')
),
'btr_2': ('4C0FB3D8', None, (),
('rax = (rax & (0xffffffffffffffff ^ (1 << r11)))',
'eflags_cf = ((rax >> r11) & 1)')
),
# imul ecx
'imul': ('f7e9', None, (),
('rax = (((rax & 0xffffffff) * (rcx & 0xffffffff)) & 0xffffffff)',
'rdx = (((rax & 0xffffffff) * (rcx & 0xffffffff)) >> 32)',
'eflags_of = (((rax & 0xffffffff) * (rcx & 0xffffffff)) == signextend((rcx & 0xffffffff), 8))',
'eflags_cf = (((rax & 0xffffffff) * (rcx & 0xffffffff)) == signextend((rcx & 0xffffffff), 8))')),
# imul rcx, r11, 0xabcd
'imul 2': ('4969CBCDAB0000', None, (),
('rcx = (r11 * 0x0000abcd)',
'eflags_cf = ((r11 * 0x0000abcd) == signextend(rcx, 16))',
'eflags_of = ((r11 * 0x0000abcd) == signextend(rcx, 16))')
),
# sub esp, 0x200
'sub': ('81EC00020000', None, (),
('rsp = ((rsp & 0xffffffff) - 512)',
'eflags_gt = ((rsp & 0xffffffff) > 512)',
'eflags_lt = ((rsp & 0xffffffff) < 512)',
'eflags_sf = ((rsp & 0xffffffff) < 512)',
'eflags_eq = ((rsp & 0xffffffff) == 512)',
'eflags_of = (((rsp & 0xffffffff) - 512) > 0x00007fff)',
'SKIPeflags_pf = ')
),
# add ax, 0xa3f4
'add': ('6605F4A3', None, (),
('rax = (((rax & 0x0000ffff) + 0x0000a3f4) | (rax & 0xffffffffffff0000))',
'eflags_gt = ((rax & 0x0000ffff) > 0x0000a3f4)',
'eflags_lt = ((rax & 0x0000ffff) < 0x0000a3f4)',
'eflags_sf = (((rax & 0x0000ffff) + 0x0000a3f4) < 0)',
'eflags_eq = (((rax & 0x0000ffff) + 0x0000a3f4) == 0)',
'eflags_of = (((rax & 0x0000ffff) + 0x0000a3f4) > 127)',
'SKIPeflags_pf = ') # TODO: Re-review the parity generator to make sure it's not a mess
),
# div rcx
'div': ('48F7F1', None, (),
('rax = (((rdx << 64) | rax) / rcx)',
'rdx = (((rdx << 64) | rax) % rcx)')
),
# not al
'not': ('f6d0', None, (),
('rax = (((rax & 255) ^ 255) | (rax & 0xffffffffffffff00))',)),
# not ax
'not2': ('66f7d0', None, (),
('rax = (((rax & 0x0000ffff) ^ 0x0000ffff) | (rax & 0xffffffffffff0000))',)),
# not eax
'not3': ('f7d0', None, (),
('rax = ((rax & 0xffffffff) ^ 0xffffffff)',)),
# not rax
'not4': ('48f7d0', None, (),
('rax = (rax ^ 0xffffffffffffffff)',)),
}
|
9383ee610ca89bebae13f2b06361f1b422209438
|
c4f4c28320b660c4312543fb90c7a06a0381ad07
|
/Inventory/admin.py
|
527a7159c1b3eaa6a85f2b2992dddd4adab2dcf5
|
[] |
no_license
|
KinjalDas/Inventory-Management-System-Django
|
3dc7f6c5b059e3a457fc22162fd828518128970d
|
909bd786b8cfa59d1af6c58f2e59d732f3c1b4a2
|
refs/heads/master
| 2022-11-22T03:40:32.876278
| 2022-10-31T07:46:39
| 2022-10-31T07:46:39
| 144,360,465
| 108
| 80
| null | 2022-10-31T07:47:30
| 2018-08-11T06:12:40
|
Python
|
UTF-8
|
Python
| false
| false
| 235
|
py
|
admin.py
|
from django.contrib import admin
# Register your models here.
from .models import Item,Category,Client,Transaction
admin.site.register(Item)
admin.site.register(Client)
admin.site.register(Category)
admin.site.register(Transaction)
|
9b22b529fc7fc51cf4dc55d84fc330c16ffa8670
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/verisure/alarm_control_panel.py
|
26e74cceb9e97e4ba7b3a73698b0b3b455062efb
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 4,409
|
py
|
alarm_control_panel.py
|
"""Support for Verisure alarm control panels."""
from __future__ import annotations
import asyncio
from homeassistant.components.alarm_control_panel import (
AlarmControlPanelEntity,
AlarmControlPanelEntityFeature,
CodeFormat,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import STATE_ALARM_ARMING, STATE_ALARM_DISARMING
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.device_registry import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import ALARM_STATE_TO_HA, CONF_GIID, DOMAIN, LOGGER
from .coordinator import VerisureDataUpdateCoordinator
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up Verisure alarm control panel from a config entry."""
async_add_entities([VerisureAlarm(coordinator=hass.data[DOMAIN][entry.entry_id])])
class VerisureAlarm(
CoordinatorEntity[VerisureDataUpdateCoordinator], AlarmControlPanelEntity
):
"""Representation of a Verisure alarm status."""
_attr_code_format = CodeFormat.NUMBER
_attr_has_entity_name = True
_attr_name = None
_attr_supported_features = (
AlarmControlPanelEntityFeature.ARM_HOME
| AlarmControlPanelEntityFeature.ARM_AWAY
)
@property
def device_info(self) -> DeviceInfo:
"""Return device information about this entity."""
return DeviceInfo(
name="Verisure Alarm",
manufacturer="Verisure",
model="VBox",
identifiers={(DOMAIN, self.coordinator.entry.data[CONF_GIID])},
configuration_url="https://mypages.verisure.com",
)
@property
def unique_id(self) -> str:
"""Return the unique ID for this entity."""
return self.coordinator.entry.data[CONF_GIID]
async def _async_set_arm_state(
self, state: str, command_data: dict[str, str | dict[str, str]]
) -> None:
"""Send set arm state command."""
arm_state = await self.hass.async_add_executor_job(
self.coordinator.verisure.request, command_data
)
LOGGER.debug("Verisure set arm state %s", state)
result = None
while result is None:
await asyncio.sleep(0.5)
transaction = await self.hass.async_add_executor_job(
self.coordinator.verisure.request,
self.coordinator.verisure.poll_arm_state(
list(arm_state["data"].values())[0], state
),
)
result = (
transaction.get("data", {})
.get("installation", {})
.get("armStateChangePollResult", {})
.get("result")
)
await self.coordinator.async_refresh()
async def async_alarm_disarm(self, code: str | None = None) -> None:
"""Send disarm command."""
self._attr_state = STATE_ALARM_DISARMING
self.async_write_ha_state()
await self._async_set_arm_state(
"DISARMED", self.coordinator.verisure.disarm(code)
)
async def async_alarm_arm_home(self, code: str | None = None) -> None:
"""Send arm home command."""
self._attr_state = STATE_ALARM_ARMING
self.async_write_ha_state()
await self._async_set_arm_state(
"ARMED_HOME", self.coordinator.verisure.arm_home(code)
)
async def async_alarm_arm_away(self, code: str | None = None) -> None:
"""Send arm away command."""
self._attr_state = STATE_ALARM_ARMING
self.async_write_ha_state()
await self._async_set_arm_state(
"ARMED_AWAY", self.coordinator.verisure.arm_away(code)
)
@callback
def _handle_coordinator_update(self) -> None:
"""Handle updated data from the coordinator."""
self._attr_state = ALARM_STATE_TO_HA.get(
self.coordinator.data["alarm"]["statusType"]
)
self._attr_changed_by = self.coordinator.data["alarm"].get("name")
super()._handle_coordinator_update()
async def async_added_to_hass(self) -> None:
"""When entity is added to hass."""
await super().async_added_to_hass()
self._handle_coordinator_update()
|
90dab5ca550d51d7c1154b59353d0654362064d5
|
4d28185e7a78a569f9a449f39f183cac3024f711
|
/packages/Python/lldbsuite/test_event/event_builder.py
|
8759011abe5ef0e3b74adcee9db6639c3bfb4ecf
|
[
"NCSA",
"Apache-2.0",
"LLVM-exception"
] |
permissive
|
apple/swift-lldb
|
2789bf44f648609a1674ee520ac20b64c95de072
|
d74be846ef3e62de946df343e8c234bde93a8912
|
refs/heads/stable
| 2023-04-06T00:28:15.882479
| 2019-10-25T22:46:59
| 2019-10-25T22:46:59
| 44,838,862
| 780
| 291
|
Apache-2.0
| 2020-01-10T19:28:43
| 2015-10-23T21:13:18
|
C++
|
UTF-8
|
Python
| false
| false
| 17,162
|
py
|
event_builder.py
|
"""
Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
See https://llvm.org/LICENSE.txt for license information.
SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Provides a class to build Python test event data structures.
"""
from __future__ import print_function
from __future__ import absolute_import
# System modules
import inspect
import time
import traceback
# Third-party modules
# LLDB modules
from . import build_exception
class EventBuilder(object):
"""Helper class to build test result event dictionaries."""
BASE_DICTIONARY = None
# Test Event Types
TYPE_JOB_RESULT = "job_result"
TYPE_TEST_RESULT = "test_result"
TYPE_TEST_START = "test_start"
TYPE_MARK_TEST_RERUN_ELIGIBLE = "test_eligible_for_rerun"
TYPE_MARK_TEST_EXPECTED_FAILURE = "test_expected_failure"
TYPE_SESSION_TERMINATE = "terminate"
RESULT_TYPES = {TYPE_JOB_RESULT, TYPE_TEST_RESULT}
# Test/Job Status Tags
STATUS_EXCEPTIONAL_EXIT = "exceptional_exit"
STATUS_SUCCESS = "success"
STATUS_FAILURE = "failure"
STATUS_EXPECTED_FAILURE = "expected_failure"
STATUS_EXPECTED_TIMEOUT = "expected_timeout"
STATUS_UNEXPECTED_SUCCESS = "unexpected_success"
STATUS_SKIP = "skip"
STATUS_ERROR = "error"
STATUS_TIMEOUT = "timeout"
"""Test methods or jobs with a status matching any of these
status values will cause a testrun failure, unless
the test methods rerun and do not trigger an issue when rerun."""
TESTRUN_ERROR_STATUS_VALUES = {
STATUS_ERROR,
STATUS_EXCEPTIONAL_EXIT,
STATUS_FAILURE,
STATUS_TIMEOUT}
@staticmethod
def _get_test_name_info(test):
"""Returns (test-class-name, test-method-name) from a test case instance.
@param test a unittest.TestCase instance.
@return tuple containing (test class name, test method name)
"""
test_class_components = test.id().split(".")
test_class_name = ".".join(test_class_components[:-1])
test_name = test_class_components[-1]
return test_class_name, test_name
@staticmethod
def bare_event(event_type):
"""Creates an event with default additions, event type and timestamp.
@param event_type the value set for the "event" key, used
to distinguish events.
@returns an event dictionary with all default additions, the "event"
key set to the passed in event_type, and the event_time value set to
time.time().
"""
if EventBuilder.BASE_DICTIONARY is not None:
# Start with a copy of the "always include" entries.
event = dict(EventBuilder.BASE_DICTIONARY)
else:
event = {}
event.update({
"event": event_type,
"event_time": time.time()
})
return event
@staticmethod
def _assert_is_python_sourcefile(test_filename):
if test_filename is not None:
if not test_filename.endswith(".py"):
raise Exception(
"source python filename has unexpected extension: {}".format(test_filename))
return test_filename
@staticmethod
def _event_dictionary_common(test, event_type):
"""Returns an event dictionary setup with values for the given event type.
@param test the unittest.TestCase instance
@param event_type the name of the event type (string).
@return event dictionary with common event fields set.
"""
test_class_name, test_name = EventBuilder._get_test_name_info(test)
# Determine the filename for the test case. If there is an attribute
# for it, use it. Otherwise, determine from the TestCase class path.
if hasattr(test, "test_filename"):
test_filename = EventBuilder._assert_is_python_sourcefile(
test.test_filename)
else:
test_filename = EventBuilder._assert_is_python_sourcefile(
inspect.getsourcefile(test.__class__))
event = EventBuilder.bare_event(event_type)
event.update({
"test_class": test_class_name,
"test_name": test_name,
"test_filename": test_filename
})
return event
@staticmethod
def _error_tuple_class(error_tuple):
"""Returns the unittest error tuple's error class as a string.
@param error_tuple the error tuple provided by the test framework.
@return the error type (typically an exception) raised by the
test framework.
"""
type_var = error_tuple[0]
module = inspect.getmodule(type_var)
if module:
return "{}.{}".format(module.__name__, type_var.__name__)
else:
return type_var.__name__
@staticmethod
def _error_tuple_message(error_tuple):
"""Returns the unittest error tuple's error message.
@param error_tuple the error tuple provided by the test framework.
@return the error message provided by the test framework.
"""
return str(error_tuple[1])
@staticmethod
def _error_tuple_traceback(error_tuple):
"""Returns the unittest error tuple's error message.
@param error_tuple the error tuple provided by the test framework.
@return the error message provided by the test framework.
"""
return error_tuple[2]
@staticmethod
def _event_dictionary_test_result(test, status):
"""Returns an event dictionary with common test result fields set.
@param test a unittest.TestCase instance.
@param status the status/result of the test
(e.g. "success", "failure", etc.)
@return the event dictionary
"""
event = EventBuilder._event_dictionary_common(
test, EventBuilder.TYPE_TEST_RESULT)
event["status"] = status
return event
@staticmethod
def _event_dictionary_issue(test, status, error_tuple):
"""Returns an event dictionary with common issue-containing test result
fields set.
@param test a unittest.TestCase instance.
@param status the status/result of the test
(e.g. "success", "failure", etc.)
@param error_tuple the error tuple as reported by the test runner.
This is of the form (type<error>, error).
@return the event dictionary
"""
event = EventBuilder._event_dictionary_test_result(test, status)
event["issue_class"] = EventBuilder._error_tuple_class(error_tuple)
event["issue_message"] = EventBuilder._error_tuple_message(error_tuple)
backtrace = EventBuilder._error_tuple_traceback(error_tuple)
if backtrace is not None:
event["issue_backtrace"] = traceback.format_tb(backtrace)
return event
@staticmethod
def event_for_start(test):
"""Returns an event dictionary for the test start event.
@param test a unittest.TestCase instance.
@return the event dictionary
"""
return EventBuilder._event_dictionary_common(
test, EventBuilder.TYPE_TEST_START)
@staticmethod
def event_for_success(test):
"""Returns an event dictionary for a successful test.
@param test a unittest.TestCase instance.
@return the event dictionary
"""
return EventBuilder._event_dictionary_test_result(
test, EventBuilder.STATUS_SUCCESS)
@staticmethod
def event_for_unexpected_success(test, bugnumber):
"""Returns an event dictionary for a test that succeeded but was
expected to fail.
@param test a unittest.TestCase instance.
@param bugnumber the issue identifier for the bug tracking the
fix request for the test expected to fail (but is in fact
passing here).
@return the event dictionary
"""
event = EventBuilder._event_dictionary_test_result(
test, EventBuilder.STATUS_UNEXPECTED_SUCCESS)
if bugnumber:
event["bugnumber"] = str(bugnumber)
return event
@staticmethod
def event_for_failure(test, error_tuple):
"""Returns an event dictionary for a test that failed.
@param test a unittest.TestCase instance.
@param error_tuple the error tuple as reported by the test runner.
This is of the form (type<error>, error).
@return the event dictionary
"""
return EventBuilder._event_dictionary_issue(
test, EventBuilder.STATUS_FAILURE, error_tuple)
@staticmethod
def event_for_expected_failure(test, error_tuple, bugnumber):
"""Returns an event dictionary for a test that failed as expected.
@param test a unittest.TestCase instance.
@param error_tuple the error tuple as reported by the test runner.
This is of the form (type<error>, error).
@param bugnumber the issue identifier for the bug tracking the
fix request for the test expected to fail.
@return the event dictionary
"""
event = EventBuilder._event_dictionary_issue(
test, EventBuilder.STATUS_EXPECTED_FAILURE, error_tuple)
if bugnumber:
event["bugnumber"] = str(bugnumber)
return event
@staticmethod
def event_for_skip(test, reason):
"""Returns an event dictionary for a test that was skipped.
@param test a unittest.TestCase instance.
@param reason the reason why the test is being skipped.
@return the event dictionary
"""
event = EventBuilder._event_dictionary_test_result(
test, EventBuilder.STATUS_SKIP)
event["skip_reason"] = reason
return event
@staticmethod
def event_for_error(test, error_tuple):
"""Returns an event dictionary for a test that hit a test execution error.
@param test a unittest.TestCase instance.
@param error_tuple the error tuple as reported by the test runner.
This is of the form (type<error>, error).
@return the event dictionary
"""
event = EventBuilder._event_dictionary_issue(
test, EventBuilder.STATUS_ERROR, error_tuple)
event["issue_phase"] = "test"
return event
@staticmethod
def event_for_build_error(test, error_tuple):
"""Returns an event dictionary for a test that hit a test execution error
during the test cleanup phase.
@param test a unittest.TestCase instance.
@param error_tuple the error tuple as reported by the test runner.
This is of the form (type<error>, error).
@return the event dictionary
"""
event = EventBuilder._event_dictionary_issue(
test, EventBuilder.STATUS_ERROR, error_tuple)
event["issue_phase"] = "build"
build_error = error_tuple[1]
event["build_command"] = build_error.command
event["build_error"] = build_error.build_error
return event
@staticmethod
def event_for_cleanup_error(test, error_tuple):
"""Returns an event dictionary for a test that hit a test execution error
during the test cleanup phase.
@param test a unittest.TestCase instance.
@param error_tuple the error tuple as reported by the test runner.
This is of the form (type<error>, error).
@return the event dictionary
"""
event = EventBuilder._event_dictionary_issue(
test, EventBuilder.STATUS_ERROR, error_tuple)
event["issue_phase"] = "cleanup"
return event
@staticmethod
def event_for_job_test_add_error(test_filename, exception, backtrace):
event = EventBuilder.bare_event(EventBuilder.TYPE_JOB_RESULT)
event["status"] = EventBuilder.STATUS_ERROR
if test_filename is not None:
event["test_filename"] = EventBuilder._assert_is_python_sourcefile(
test_filename)
if exception is not None and "__class__" in dir(exception):
event["issue_class"] = exception.__class__
event["issue_message"] = exception
if backtrace is not None:
event["issue_backtrace"] = backtrace
return event
@staticmethod
def event_for_job_exceptional_exit(
pid, worker_index, exception_code, exception_description,
test_filename, command_line):
"""Creates an event for a job (i.e. process) exit due to signal.
@param pid the process id for the job that failed
@param worker_index optional id for the job queue running the process
@param exception_code optional code
(e.g. SIGTERM integer signal number)
@param exception_description optional string containing symbolic
representation of the issue (e.g. "SIGTERM")
@param test_filename the path to the test filename that exited
in some exceptional way.
@param command_line the Popen()-style list provided as the command line
for the process that timed out.
@return an event dictionary coding the job completion description.
"""
event = EventBuilder.bare_event(EventBuilder.TYPE_JOB_RESULT)
event["status"] = EventBuilder.STATUS_EXCEPTIONAL_EXIT
if pid is not None:
event["pid"] = pid
if worker_index is not None:
event["worker_index"] = int(worker_index)
if exception_code is not None:
event["exception_code"] = exception_code
if exception_description is not None:
event["exception_description"] = exception_description
if test_filename is not None:
event["test_filename"] = EventBuilder._assert_is_python_sourcefile(
test_filename)
if command_line is not None:
event["command_line"] = command_line
return event
@staticmethod
def event_for_job_timeout(pid, worker_index, test_filename, command_line):
"""Creates an event for a job (i.e. process) timeout.
@param pid the process id for the job that timed out
@param worker_index optional id for the job queue running the process
@param test_filename the path to the test filename that timed out.
@param command_line the Popen-style list provided as the command line
for the process that timed out.
@return an event dictionary coding the job completion description.
"""
event = EventBuilder.bare_event(EventBuilder.TYPE_JOB_RESULT)
event["status"] = "timeout"
if pid is not None:
event["pid"] = pid
if worker_index is not None:
event["worker_index"] = int(worker_index)
if test_filename is not None:
event["test_filename"] = EventBuilder._assert_is_python_sourcefile(
test_filename)
if command_line is not None:
event["command_line"] = command_line
return event
@staticmethod
def event_for_mark_test_rerun_eligible(test):
"""Creates an event that indicates the specified test is explicitly
eligible for rerun.
Note there is a mode that will enable test rerun eligibility at the
global level. These markings for explicit rerun eligibility are
intended for the mode of running where only explicitly re-runnable
tests are rerun upon hitting an issue.
@param test the TestCase instance to which this pertains.
@return an event that specifies the given test as being eligible to
be rerun.
"""
event = EventBuilder._event_dictionary_common(
test,
EventBuilder.TYPE_MARK_TEST_RERUN_ELIGIBLE)
return event
@staticmethod
def event_for_mark_test_expected_failure(test):
"""Creates an event that indicates the specified test is expected
to fail.
@param test the TestCase instance to which this pertains.
@return an event that specifies the given test is expected to fail.
"""
event = EventBuilder._event_dictionary_common(
test,
EventBuilder.TYPE_MARK_TEST_EXPECTED_FAILURE)
return event
@staticmethod
def add_entries_to_all_events(entries_dict):
"""Specifies a dictionary of entries to add to all test events.
This provides a mechanism for, say, a parallel test runner to
indicate to each inferior dotest.py that it should add a
worker index to each.
Calling this method replaces all previous entries added
by a prior call to this.
Event build methods will overwrite any entries that collide.
Thus, the passed in dictionary is the base, which gets merged
over by event building when keys collide.
@param entries_dict a dictionary containing key and value
pairs that should be merged into all events created by the
event generator. May be None to clear out any extra entries.
"""
EventBuilder.BASE_DICTIONARY = dict(entries_dict)
|
1b825f3fb0611890fbab760f5a20c2655b5e0705
|
391dfd77c1bb85c08b4ead451ecdab0858eb141f
|
/examples/gltf_scenes.py
|
5d7b33cf413045787282116a981825a90edc3892
|
[
"MIT"
] |
permissive
|
moderngl/moderngl-window
|
308682b5aa625dbb49ca554459bed9853a5e69c3
|
200f2b9ea8b350b0ac9bb6a2d24310c0d8227794
|
refs/heads/master
| 2023-05-28T00:33:49.924394
| 2023-05-18T11:06:26
| 2023-05-18T11:06:26
| 172,498,670
| 205
| 48
|
MIT
| 2023-09-01T17:45:51
| 2019-02-25T12:05:57
|
Python
|
UTF-8
|
Python
| false
| false
| 4,860
|
py
|
gltf_scenes.py
|
from pathlib import Path
from pyrr import Matrix44
import moderngl
import moderngl_window as mglw
from moderngl_window.scene.camera import KeyboardCamera
from base import CameraWindow
class CubeModel(CameraWindow):
"""
In oder for this example to work you need to clone the gltf
model samples repository and ensure resource_dir is set correctly:
https://github.com/KhronosGroup/glTF-Sample-Models/tree/master/2.0
"""
title = 'GL Transmission Format (glTF) 2.0 Scene'
window_size = 1280, 720
aspect_ratio = None
resource_dir = Path(__file__, '../../../glTF-Sample-Models/2.0').resolve()
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.wnd.mouse_exclusivity = True
# --- glTF-Sample-Models ---
# self.scene = self.load_scene('2CylinderEngine/glTF-Binary/2CylinderEngine.glb')
# self.scene = self.load_scene('CesiumMilkTruck/glTF-Embedded/CesiumMilkTruck.gltf')
# self.scene = self.load_scene('CesiumMilkTruck/glTF-Binary/CesiumMilkTruck.glb')
# self.scene = self.load_scene('CesiumMilkTruck/glTF/CesiumMilkTruck.gltf')
self.scene = self.load_scene('Sponza/glTF/Sponza.gltf')
# self.scene = self.load_scene('Lantern/glTF-Binary/Lantern.glb')
# self.scene = self.load_scene('Buggy/glTF-Binary/Buggy.glb')
# self.scene = self.load_scene('VC/glTF-Binary/VC.glb')
# self.scene = self.load_scene('DamagedHelmet/glTF-Binary/DamagedHelmet.glb')
# self.scene = self.load_scene('BoxInterleaved/glTF/BoxInterleaved.gltf')
# self.scene = self.load_scene('OrientationTest/glTF/OrientationTest.gltf')
# self.scene = self.load_scene('AntiqueCamera/glTF/AntiqueCamera.gltf')
# self.scene = self.load_scene('BoomBox/glTF/BoomBox.gltf')
# self.scene = self.load_scene('Box/glTF/Box.gltf')
# self.scene = self.load_scene('BoxTextured/glTF/BoxTextured.gltf')
# self.scene = self.load_scene('BoxTexturedNonPowerOfTwo/glTF/BoxTexturedNonPowerOfTwo.gltf')
# self.scene = self.load_scene('BoxVertexColors/glTF/BoxVertexColors.gltf')
# self.scene = self.load_scene('BrainStem/glTF/BrainStem.gltf')
# self.scene = self.load_scene('Corset/glTF/Corset.gltf')
# self.scene = self.load_scene('FlightHelmet/glTF/FlightHelmet.gltf')
# self.scene = self.load_scene('Fox/glTF/Fox.gltf')
# self.scene = self.load_scene('GearboxAssy/glTF/GearboxAssy.gltf')
# self.scene = self.load_scene('ReciprocatingSaw/glTF/ReciprocatingSaw.gltf')
# self.scene = self.load_scene('RiggedFigure/glTF/RiggedFigure.gltf')
# self.scene = self.load_scene('RiggedSimple/glTF/RiggedSimple.gltf')
# self.scene = self.load_scene('SciFiHelmet/glTF/SciFiHelmet.gltf')
# self.scene = self.load_scene('SimpleMeshes/glTF/SimpleMeshes.gltf')
# self.scene = self.load_scene('SimpleSparseAccessor/glTF/SimpleSparseAccessor.gltf')
# self.scene = self.load_scene('Suzanne/glTF/Suzanne.gltf')
# self.scene = self.load_scene('TextureCoordinateTest/glTF/TextureCoordinateTest.gltf')
# self.scene = self.load_scene('TextureSettingsTest/glTF/TextureSettingsTest.gltf')
# self.scene = self.load_scene('VertexColorTest/glTF/VertexColorTest.gltf')
# self.scene = self.load_scene('WaterBottle/glTF/WaterBottle.gltf')
self.camera = KeyboardCamera(self.wnd.keys, fov=75.0, aspect_ratio=self.wnd.aspect_ratio, near=0.1, far=1000.0)
# self.camera.velocity = 7.0
# self.camera.mouse_sensitivity = 0.3
# Use this for gltf scenes for better camera controls
if self.scene.diagonal_size > 0:
self.camera.velocity = self.scene.diagonal_size / 5.0
def render(self, time: float, frame_time: float):
"""Render the scene"""
self.ctx.enable_only(moderngl.DEPTH_TEST | moderngl.CULL_FACE)
# Move camera in on the z axis slightly by default
translation = Matrix44.from_translation((0, 0, -1.5), dtype='f4')
camera_matrix = self.camera.matrix * translation
self.scene.draw(
projection_matrix=self.camera.projection.matrix,
camera_matrix=camera_matrix,
time=time,
)
# Draw bounding boxes
self.scene.draw_bbox(
projection_matrix=self.camera.projection.matrix,
camera_matrix=camera_matrix,
children=True,
color=(0.75, 0.75, 0.75),
)
# self.scene.draw_wireframe(
# projection_matrix=self.camera.projection.matrix,
# camera_matrix=camera_matrix,
# color=(1, 1, 1, 1),
# )
if __name__ == '__main__':
mglw.run_window_config(CubeModel)
|
2c513d762efcaa5154ca11af3a3a1cd6ad9983e5
|
568fa58296378fa129ab3349adf010daa44ed45b
|
/tests/st/ops/test_one_hot.py
|
f6118387b7eb396b95eb5be10ab1c22a16df4505
|
[
"Apache-2.0",
"BSD-3-Clause",
"NCSA",
"X11-distribute-modifications-variant",
"Zlib",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"LLVM-exception",
"BSD-2-Clause"
] |
permissive
|
mindspore-ai/akg
|
37f471badc66de6a831f1f45ad84344f34d23ef2
|
99f33858d6972741748cbfc9ab0bf9600428fef7
|
refs/heads/master
| 2023-07-25T23:03:17.672665
| 2023-07-11T07:33:57
| 2023-07-11T07:33:57
| 274,077,856
| 319
| 36
|
Apache-2.0
| 2021-12-30T13:43:08
| 2020-06-22T08:09:05
|
Python
|
UTF-8
|
Python
| false
| false
| 5,633
|
py
|
test_one_hot.py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
import akg.utils as utils
from tests.common.base import TestBase
from tests.common.test_run import one_hot_run
class TestCase(TestBase):
def setup(self):
case_name = "test_akg_one_hot_001"
case_path = os.getcwd()
# params init
self.params_init(case_name, case_path)
self.caseresult = True
self._log.info("============= {0} Setup case============".format(self.casename))
self.testarg = [
# caseflag,opfuncname,testRunArgs, dimArgs
("test_one_hot1", one_hot_run, ((8, 8732), 6, "float16", 1, 0, -1)),
("test_one_hot1", one_hot_run, ((8, 8732), 6, "float32", 1, 0, -1)),
("test_one_hot1", one_hot_run, ((8,), 6, "float32", 1, 0, -1)),
("test_one_hot1", one_hot_run, ((16,), 16, "float32", 1, 0, -1)),
("test_one_hot2", one_hot_run, ((123,), 128, "int32", 1, 0, -1)),
("test_one_hot3", one_hot_run, ((1024,), 16, "int32", 1, 0, -1)),
("test_one_hot4", one_hot_run, ((160,), 160, "int32", 1, 0, -1)),
("test_one_hot5", one_hot_run, ((1280,), 16, "int32", 1, 0, -1)),
("test_one_hot6", one_hot_run, ((8,), 8, "int32", 1, 0, -1)),
("test_one_hot7", one_hot_run, ((64,), 64, "int32", 1, 0, -1)),
("test_one_hot8", one_hot_run, ((8192,), 64, "int32", 1, 0, -1)),
("test_one_hot9", one_hot_run, ((1024,), 16, "int32", 1, 0, 0)),
#("test_one_hot10", one_hot_run, ((1052676,), 21, "int32", 1, 0, -1)),
("test_one_hot10", one_hot_run, ((1,), 32000, "float32", 1, 0, -1)),
# ("test_one_hot10", one_hot_run,((1024, 16), 16,"int32", 1, 0, 0)),
]
self.testarg_rpc_cloud = [
# int32 - int32 - float - float:[160] - [] - [] - [] = float:[160, 30522]
("test_one_hot_001", one_hot_run, ((160,), 30522, "int32", 1, 0, -1)),
# int32 - int32 - float - float:[8192] - [] - [] - [] = float:[8192, 2]
("test_one_hot_002", one_hot_run, ((8192,), 2, "int32", 1, 0, -1)),
# int32 - int32 - float - float:[1024] - [] - [] - [] = float:[1024, 2]
("test_one_hot_003", one_hot_run, ((1024,), 2, "int32", 1, 0, -1)),
# int32 - int32 - float - float:[1280] - [] - [] - [] = float:[1280, 30522]
("test_one_hot_004", one_hot_run, ((1280,), 30522, "int32", 1, 0, -1)),
# int32 - int32 - float - float:[8] - [] - [] - [] = float:[8, 2]
("test_one_hot_005", one_hot_run, ((8,), 2, "int32", 1, 0, -1)),
# int32 - int32 - float - float:[64] - [] - [] - [] = float:[64, 2]
("test_one_hot_006", one_hot_run, ((64,), 2, "int32", 1, 0, -1)),
# int32 - int32 - float - float:[8192] - [] - [] - [] = float:[8192, 21128]
("test_one_hot_007", one_hot_run, ((8192,), 21128, "int32", 1, 0, -1)),
# int32 - int32 - float - float:[8192] - [] - [] - [] = float:[8192, 2]
("test_one_hot_008", one_hot_run, ((8192,), 2, "int32", 1, 0, -1)),
# int32 - int32 - float - float:[1280] - [] - [] - [] = float:[1280, 21128]
("test_one_hot_009", one_hot_run, ((1280,), 21128, "int32", 1, 0, -1)),
# int32 - int32 - float - float:[64] - [] - [] - [] = float:[64, 2]
("test_one_hot_010", one_hot_run, ((64,), 2, "int32", 1, 0, -1)),
("test_one_hot_011", one_hot_run, ((160,), 21128, "float32", 1, 0, -1)),
]
self.args_default = [
("000_case", one_hot_run, ((1024,), 16, "int32", 1, 0, 0), ["level0"]),
("001_case", one_hot_run, ((1024,), 16, "float32", 1, 0, 0), ["level0"]),
("001_case", one_hot_run, ((32,), 16, "int32", 1, 0, 0), ["level0"]),
("001_case", one_hot_run, ((32,), 16, "float32", 1, 0, 0), ["level0"]),
]
return True
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gpu_level0(self):
return self.run_cases(self.args_default, utils.CUDA, "level0")
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_cpu_level0(self):
return self.run_cases(self.args_default, utils.LLVM, "level0")
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_run(self):
"""
run case.#
:return:
"""
self.common_run(self.testarg)
def test_run_rpc_cloud(self):
self.common_run(self.testarg_rpc_cloud)
def teardown(self):
"""
clean environment
:return:
"""
self._log.info("============= {0} Teardown============".format(self.casename))
return
if __name__ == "__main__":
t = TestCase()
t.setup()
t.test_run()
t.teardown()
|
dc13d8345bbdcd3047a47ce778d81aab8a855d7c
|
0d40af0c10fd47af5ea88675e5be3b7ee98cbcd8
|
/testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/pointer_touch.py
|
c1c430765f7dfded519931c0fabd290886608b48
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
mozilla/gecko-dev
|
f02cdc9e61197c1d2a038cf1ad8cb5339e8cb62a
|
304e9eaa3d7dbfab38d5b5401cbae227b20efe37
|
refs/heads/master
| 2023-09-01T20:40:41.588946
| 2023-09-01T11:38:17
| 2023-09-01T11:38:17
| 13,509,108
| 3,025
| 1,882
|
NOASSERTION
| 2023-09-05T04:11:13
| 2013-10-11T20:35:42
| null |
UTF-8
|
Python
| false
| false
| 6,264
|
py
|
pointer_touch.py
|
import pytest
from webdriver.bidi.modules.input import Actions, get_element_origin
from webdriver.bidi.modules.script import ContextTarget
from .. import get_events
from . import (
assert_pointer_events,
get_inview_center_bidi,
get_shadow_root_from_test_page,
record_pointer_events,
)
pytestmark = pytest.mark.asyncio
@pytest.mark.parametrize("mode", ["open", "closed"])
@pytest.mark.parametrize("nested", [False, True], ids=["outer", "inner"])
async def test_touch_pointer_in_shadow_tree(
bidi_session, top_context, get_test_page, mode, nested
):
await bidi_session.browsing_context.navigate(
context=top_context["context"],
url=get_test_page(
shadow_doc="""
<div id="pointer-target"
style="width: 10px; height: 10px; background-color:blue;">
</div>""",
shadow_root_mode=mode,
nested_shadow_dom=nested,
),
wait="complete",
)
shadow_root = await get_shadow_root_from_test_page(
bidi_session, top_context, nested
)
# Add a simplified event recorder to track events in the test ShadowRoot.
target = await record_pointer_events(
bidi_session, top_context, shadow_root, "#pointer-target"
)
actions = Actions()
(
actions.add_pointer(pointer_type="touch")
.pointer_move(x=0, y=0, origin=get_element_origin(target))
.pointer_down(button=0)
.pointer_up(button=0)
)
await bidi_session.input.perform_actions(
actions=actions, context=top_context["context"]
)
await assert_pointer_events(
bidi_session,
top_context,
expected_events=["pointerdown", "pointerup"],
target="pointer-target",
pointer_type="touch",
)
async def test_touch_pointer_properties(
bidi_session, top_context, get_element, load_static_test_page
):
await load_static_test_page(page="test_actions_pointer.html")
pointerArea = await get_element("#pointerArea")
center = await get_inview_center_bidi(
bidi_session, context=top_context, element=pointerArea
)
actions = Actions()
(
actions.add_pointer(pointer_type="touch")
.pointer_move(x=0, y=0, origin=get_element_origin(pointerArea))
.pointer_down(
button=0,
width=23,
height=31,
pressure=0.78,
tilt_x=21,
tilt_y=-8,
twist=355,
)
.pointer_move(
x=10,
y=10,
origin=get_element_origin(pointerArea),
width=39,
height=35,
pressure=0.91,
tilt_x=-19,
tilt_y=62,
twist=345,
)
.pointer_up(button=0)
.pointer_move(x=80, y=50, origin=get_element_origin(pointerArea))
)
await bidi_session.input.perform_actions(
actions=actions, context=top_context["context"]
)
events = await get_events(bidi_session, top_context["context"])
assert len(events) == 7
event_types = [e["type"] for e in events]
assert [
"pointerover",
"pointerenter",
"pointerdown",
"pointermove",
"pointerup",
"pointerout",
"pointerleave",
] == event_types
assert events[2]["type"] == "pointerdown"
assert events[2]["pageX"] == pytest.approx(center["x"], abs=1.0)
assert events[2]["pageY"] == pytest.approx(center["y"], abs=1.0)
assert events[2]["target"] == "pointerArea"
assert events[2]["pointerType"] == "touch"
assert round(events[2]["width"], 2) == 23
assert round(events[2]["height"], 2) == 31
assert round(events[2]["pressure"], 2) == 0.78
assert events[3]["type"] == "pointermove"
assert events[3]["pageX"] == pytest.approx(center["x"] + 10, abs=1.0)
assert events[3]["pageY"] == pytest.approx(center["y"] + 10, abs=1.0)
assert events[3]["target"] == "pointerArea"
assert events[3]["pointerType"] == "touch"
assert round(events[3]["width"], 2) == 39
assert round(events[3]["height"], 2) == 35
assert round(events[3]["pressure"], 2) == 0.91
async def test_touch_pointer_properties_tilt_twist(
bidi_session, top_context, get_element, load_static_test_page
):
# This test only covers the tilt/twist properties which are
# more specific to pen-type pointers, but which the spec allows
# for generic touch pointers. Seperating this out gives better
# coverage of the basic properties in test_touch_pointer_properties
await load_static_test_page(page="test_actions_pointer.html")
pointerArea = await get_element("#pointerArea")
center = await get_inview_center_bidi(
bidi_session, context=top_context, element=pointerArea
)
actions = Actions()
(
actions.add_pointer(pointer_type="touch")
.pointer_move(x=0, y=0, origin=get_element_origin(pointerArea))
.pointer_down(
button=0,
width=23,
height=31,
pressure=0.78,
tilt_x=21,
tilt_y=-8,
twist=355,
)
.pointer_move(
x=10,
y=10,
origin=get_element_origin(pointerArea),
width=39,
height=35,
pressure=0.91,
tilt_x=-19,
tilt_y=62,
twist=345,
)
.pointer_up(button=0)
.pointer_move(x=80, y=50, origin=get_element_origin(pointerArea))
)
await bidi_session.input.perform_actions(
actions=actions, context=top_context["context"]
)
events = await get_events(bidi_session, top_context["context"])
assert len(events) == 7
event_types = [e["type"] for e in events]
assert [
"pointerover",
"pointerenter",
"pointerdown",
"pointermove",
"pointerup",
"pointerout",
"pointerleave",
] == event_types
assert events[2]["type"] == "pointerdown"
assert events[2]["tiltX"] == 21
assert events[2]["tiltY"] == -8
assert events[2]["twist"] == 355
assert events[3]["type"] == "pointermove"
assert events[3]["tiltX"] == -19
assert events[3]["tiltY"] == 62
assert events[3]["twist"] == 345
|
f84cab6085f5365aeb01578ce3980e5664302102
|
b7314f9480634b2f2998c8181d4284d2b52ebba1
|
/src/python/txtai/app/base.py
|
61d8a6595e652bf94144b6e12209e67286c9346f
|
[
"Apache-2.0",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
neuml/txtai
|
3ca6fba11126d650ea4f2cf5199011a52ea56e4e
|
789a4555cb60ee9cdfa69afae5a5236d197e2b07
|
refs/heads/master
| 2023-08-31T08:09:31.834178
| 2023-08-29T15:36:23
| 2023-08-29T15:36:23
| 286,301,447
| 4,804
| 387
|
Apache-2.0
| 2023-09-11T17:12:40
| 2020-08-09T19:14:59
|
Python
|
UTF-8
|
Python
| false
| false
| 20,925
|
py
|
base.py
|
"""
Application module
"""
import os
from multiprocessing.pool import ThreadPool
from threading import RLock
import yaml
from ..embeddings import Documents, Embeddings
from ..pipeline import PipelineFactory
from ..workflow import WorkflowFactory
# pylint: disable=R0904
class Application:
"""
Builds YAML-configured txtai applications.
"""
@staticmethod
def read(data):
"""
Reads a YAML configuration file.
Args:
data: input data
Returns:
yaml
"""
if isinstance(data, str):
if os.path.exists(data):
# Read yaml from file
with open(data, "r", encoding="utf-8") as f:
# Read configuration
return yaml.safe_load(f)
# Attempt to read yaml from input
data = yaml.safe_load(data)
if not isinstance(data, str):
return data
# File not found and input is not yaml, raise error
raise FileNotFoundError(f"Unable to load file '{data}'")
# Return unmodified
return data
def __init__(self, config, loaddata=True):
"""
Creates an Application instance, which encapsulates embeddings, pipelines and workflows.
Args:
config: index configuration
loaddata: If True (default), load existing index data, if available. Otherwise, only load models.
"""
# Initialize member variables
self.config, self.documents, self.embeddings = Application.read(config), None, None
# Write lock - allows only a single thread to update embeddings
self.lock = RLock()
# ThreadPool - runs scheduled workflows
self.pool = None
# Create pipelines
self.pipes()
# Create workflows
self.flows()
# Create embeddings index
self.indexes(loaddata)
def __del__(self):
"""
Close threadpool when this object is garbage collected.
"""
if self.pool:
self.pool.close()
self.pool = None
def pipes(self):
"""
Initialize pipelines.
"""
# Pipeline definitions
self.pipelines = {}
# Default pipelines
pipelines = list(PipelineFactory.list().keys())
# Add custom pipelines
for key in self.config:
if "." in key:
pipelines.append(key)
# Move dependent pipelines to end of list
pipelines = sorted(pipelines, key=lambda x: x in ["similarity", "extractor"])
# Create pipelines
for pipeline in pipelines:
if pipeline in self.config:
config = self.config[pipeline] if self.config[pipeline] else {}
# Add application reference, if requested
if "application" in config:
config["application"] = self
# Custom pipeline parameters
if pipeline == "extractor" and "similarity" not in config:
# Add placeholder, will be set to embeddings index once initialized
config["similarity"] = None
# Resolve reference pipeline, if necessary
if config.get("path") in self.pipelines:
config["path"] = self.pipelines[config["path"]]
elif pipeline == "similarity" and "path" not in config and "labels" in self.pipelines:
config["model"] = self.pipelines["labels"]
self.pipelines[pipeline] = PipelineFactory.create(config, pipeline)
def flows(self):
"""
Initialize workflows.
"""
# Workflow definitions
self.workflows = {}
# Create workflows
if "workflow" in self.config:
for workflow, config in self.config["workflow"].items():
# Create copy of config
config = config.copy()
# Resolve callable functions
config["tasks"] = [self.resolve(task) for task in config["tasks"]]
# Resolve stream functions
if "stream" in config:
config["stream"] = self.resolve(config["stream"])
# Get scheduler config
schedule = config.pop("schedule", None)
# Create workflow
self.workflows[workflow] = WorkflowFactory.create(config, workflow)
# Schedule job if necessary
if schedule:
# Create pool if necessary
if not self.pool:
self.pool = ThreadPool()
self.pool.apply_async(self.workflows[workflow].schedule, kwds=schedule)
def indexes(self, loaddata):
"""
Initialize an embeddings index.
Args:
loaddata: If True (default), load existing index data, if available. Otherwise, only load models.
"""
# Resolve functions in embeddings config
config = self.config.get("embeddings") if self.config else None
if config:
# Create copy of config
config = config.copy()
if "functions" in config:
# Resolve callable functions
functions = []
for fn in config["functions"]:
original = fn
try:
if isinstance(fn, dict):
fn = fn.copy()
fn["function"] = self.function(fn["function"])
else:
fn = self.function(fn)
# pylint: disable=W0703
except Exception:
# Not a resolvable function, pipeline or workflow - further resolution will happen in embeddings
fn = original
functions.append(fn)
config["functions"] = functions
if "transform" in config:
# Resolve transform function
config["transform"] = self.function(config["transform"])
# Load embeddings index if loaddata and index exists
if loaddata and Embeddings().exists(self.config.get("path"), self.config.get("cloud")):
# Load existing index if available
self.embeddings = Embeddings()
self.embeddings.load(self.config.get("path"), self.config.get("cloud"))
elif "embeddings" in self.config:
# Initialize empty embeddings
self.embeddings = Embeddings(config)
# If an extractor pipeline is defined and the similarity attribute is None, set to embeddings index
extractor = self.pipelines.get("extractor")
if extractor and not extractor.similarity:
extractor.similarity = self.embeddings
def resolve(self, task):
"""
Resolves callable functions for a task.
Args:
task: input task config
"""
# Check for task shorthand syntax
task = {"action": task} if isinstance(task, (str, list)) else task
if "action" in task:
action = task["action"]
values = [action] if not isinstance(action, list) else action
actions = []
for a in values:
if a in ["index", "upsert"]:
# Add queue action to buffer documents to index
actions.append(self.add)
# Override and disable unpacking for indexing actions
task["unpack"] = False
# Add finalize to trigger indexing
task["finalize"] = self.upsert if a == "upsert" else self.index
elif a == "search":
actions.append(self.batchsearch)
elif a == "transform":
# Transform vectors
actions.append(self.batchtransform)
# Override and disable one-to-many transformations
task["onetomany"] = False
else:
# Resolve action to callable function
actions.append(self.function(a))
# Save resolved action(s)
task["action"] = actions[0] if not isinstance(action, list) else actions
# Resolve initializer
if "initialize" in task and isinstance(task["initialize"], str):
task["initialize"] = self.function(task["initialize"])
# Resolve finalizer
if "finalize" in task and isinstance(task["finalize"], str):
task["finalize"] = self.function(task["finalize"])
return task
def function(self, function):
"""
Get a handle to a callable function.
Args:
function: function name
Returns:
resolved function
"""
# Check if function is a pipeline
if function in self.pipelines:
return self.pipelines[function]
# Check if function is a workflow
if function in self.workflows:
return self.workflows[function]
# Attempt to resolve action as a callable function
return PipelineFactory.create({}, function)
def search(self, query, limit=10):
"""
Finds documents in the embeddings model most similar to the input query. Returns
a list of {id: value, score: value} sorted by highest score, where id is the
document id in the embeddings model.
Args:
query: query text
limit: maximum results, used if request is None
Returns:
list of {id: value, score: value}
"""
if self.embeddings:
with self.lock:
results = self.embeddings.search(query, limit)
# Unpack (id, score) tuple, if necessary. Otherwise, results are dictionaries.
return [{"id": r[0], "score": float(r[1])} if isinstance(r, tuple) else r for r in results]
return None
def batchsearch(self, queries, limit=10):
"""
Finds documents in the embeddings model most similar to the input queries. Returns
a list of {id: value, score: value} sorted by highest score per query, where id is
the document id in the embeddings model.
Args:
queries: queries text
limit: maximum results
Returns:
list of {id: value, score: value} per query
"""
if self.embeddings:
with self.lock:
search = self.embeddings.batchsearch(queries, limit)
results = []
for result in search:
# Unpack (id, score) tuple, if necessary. Otherwise, results are dictionaries.
results.append([{"id": r[0], "score": float(r[1])} if isinstance(r, tuple) else r for r in result])
return results
return None
def add(self, documents):
"""
Adds a batch of documents for indexing.
Args:
documents: list of {id: value, data: value, tags: value}
Returns:
unmodified input documents
"""
# Raise error if index is not writable
if not self.config.get("writable"):
raise ReadOnlyError("Attempting to add documents to a read-only index (writable != True)")
if self.embeddings:
with self.lock:
# Create documents file if not already open
if not self.documents:
self.documents = Documents()
# Add documents
self.documents.add(list(documents))
# Return unmodified input documents
return documents
def index(self):
"""
Builds an embeddings index for previously batched documents.
"""
# Raise error if index is not writable
if not self.config.get("writable"):
raise ReadOnlyError("Attempting to index a read-only index (writable != True)")
if self.embeddings and self.documents:
with self.lock:
# Build scoring index if term weighting is enabled
if self.embeddings.isweighted():
self.embeddings.score(self.documents)
# Build embeddings index
self.embeddings.index(self.documents)
# Save index if path available, otherwise this is an memory-only index
if self.config.get("path"):
self.embeddings.save(self.config["path"], self.config.get("cloud"))
# Reset document stream
self.documents.close()
self.documents = None
def upsert(self):
"""
Runs an embeddings upsert operation for previously batched documents.
"""
# Raise error if index is not writable
if not self.config.get("writable"):
raise ReadOnlyError("Attempting to upsert a read-only index (writable != True)")
if self.embeddings and self.documents:
with self.lock:
# Run upsert
self.embeddings.upsert(self.documents)
# Save index if path available, otherwise this is an memory-only index
if self.config.get("path"):
self.embeddings.save(self.config["path"], self.config.get("cloud"))
# Reset document stream
self.documents.close()
self.documents = None
def delete(self, ids):
"""
Deletes from an embeddings index. Returns list of ids deleted.
Args:
ids: list of ids to delete
Returns:
ids deleted
"""
# Raise error if index is not writable
if not self.config.get("writable"):
raise ReadOnlyError("Attempting to delete from a read-only index (writable != True)")
if self.embeddings:
with self.lock:
return self.embeddings.delete(ids)
return None
def count(self):
"""
Total number of elements in this embeddings index.
Returns:
number of elements in embeddings index
"""
if self.embeddings:
return self.embeddings.count()
return None
def similarity(self, query, texts):
"""
Computes the similarity between query and list of text. Returns a list of
{id: value, score: value} sorted by highest score, where id is the index
in texts.
Args:
query: query text
texts: list of text
Returns:
list of {id: value, score: value}
"""
# Use similarity instance if available otherwise fall back to embeddings model
if "similarity" in self.pipelines:
return [{"id": uid, "score": float(score)} for uid, score in self.pipelines["similarity"](query, texts)]
if self.embeddings:
return [{"id": uid, "score": float(score)} for uid, score in self.embeddings.similarity(query, texts)]
return None
def batchsimilarity(self, queries, texts):
"""
Computes the similarity between list of queries and list of text. Returns a list
of {id: value, score: value} sorted by highest score per query, where id is the
index in texts.
Args:
queries: queries text
texts: list of text
Returns:
list of {id: value, score: value} per query
"""
# Use similarity instance if available otherwise fall back to embeddings model
if "similarity" in self.pipelines:
return [[{"id": uid, "score": float(score)} for uid, score in r] for r in self.pipelines["similarity"](queries, texts)]
if self.embeddings:
return [[{"id": uid, "score": float(score)} for uid, score in r] for r in self.embeddings.batchsimilarity(queries, texts)]
return None
def explain(self, query, texts=None, limit=10):
"""
Explains the importance of each input token in text for a query.
Args:
query: query text
texts: optional list of text, otherwise runs search query
limit: optional limit if texts is None
Returns:
list of dict per input text where a higher token scores represents higher importance relative to the query
"""
if self.embeddings:
with self.lock:
return self.embeddings.explain(query, texts, limit)
return None
def batchexplain(self, queries, texts=None, limit=10):
"""
Explains the importance of each input token in text for a list of queries.
Args:
query: queries text
texts: optional list of text, otherwise runs search queries
limit: optional limit if texts is None
Returns:
list of dict per input text per query where a higher token scores represents higher importance relative to the query
"""
if self.embeddings:
with self.lock:
return self.embeddings.batchexplain(queries, texts, limit)
return None
def transform(self, text):
"""
Transforms text into embeddings arrays.
Args:
text: input text
Returns:
embeddings array
"""
if self.embeddings:
return [float(x) for x in self.embeddings.transform((None, text, None))]
return None
def batchtransform(self, texts):
"""
Transforms list of text into embeddings arrays.
Args:
texts: list of text
Returns:
embeddings arrays
"""
if self.embeddings:
documents = [(None, text, None) for text in texts]
return [[float(x) for x in result] for result in self.embeddings.batchtransform(documents)]
return None
def extract(self, queue, texts=None):
"""
Extracts answers to input questions.
Args:
queue: list of {name: value, query: value, question: value, snippet: value}
texts: optional list of text
Returns:
list of {name: value, answer: value}
"""
if self.embeddings and "extractor" in self.pipelines:
# Get extractor instance
extractor = self.pipelines["extractor"]
# Run extractor and return results as dicts
return extractor(queue, texts)
return None
def label(self, text, labels):
"""
Applies a zero shot classifier to text using a list of labels. Returns a list of
{id: value, score: value} sorted by highest score, where id is the index in labels.
Args:
text: text|list
labels: list of labels
Returns:
list of {id: value, score: value} per text element
"""
if "labels" in self.pipelines:
# Text is a string
if isinstance(text, str):
return [{"id": uid, "score": float(score)} for uid, score in self.pipelines["labels"](text, labels)]
# Text is a list
return [[{"id": uid, "score": float(score)} for uid, score in result] for result in self.pipelines["labels"](text, labels)]
return None
def pipeline(self, name, args):
"""
Generic pipeline execution method.
Args:
name: pipeline name
args: pipeline arguments
Returns:
pipeline results
"""
if name in self.pipelines:
return self.pipelines[name](*args)
return None
def workflow(self, name, elements):
"""
Executes a workflow.
Args:
name: workflow name
elements: elements to process
Returns:
processed elements
"""
if hasattr(elements, "__len__") and hasattr(elements, "__getitem__"):
# Convert to tuples and return as a list since input is sized
elements = [tuple(element) if isinstance(element, list) else element for element in elements]
else:
# Convert to tuples and return as a generator since input is not sized
elements = (tuple(element) if isinstance(element, list) else element for element in elements)
# Execute workflow
return self.workflows[name](elements)
def wait(self):
"""
Closes threadpool and waits for completion.
"""
if self.pool:
self.pool.close()
self.pool.join()
self.pool = None
class ReadOnlyError(Exception):
"""
Error raised when trying to modify a read-only index
"""
|
e5ce92e76156dafa7abef859b2a208715426d355
|
c268dcf432f3b7171be6eb307aafbe1bd173285a
|
/reddit2telegram/channels/~inactive/r_trashtaste/app.py
|
88abaabcbbfbb808ce112eb469926b505025c697
|
[
"MIT"
] |
permissive
|
Fillll/reddit2telegram
|
a7162da2cc08c81bcc8078ea4160d4ee07461fee
|
5d8ee3097e716734d55a72f5a16ce3d7467e2ed7
|
refs/heads/master
| 2023-08-09T10:34:16.163262
| 2023-07-30T18:36:19
| 2023-07-30T18:36:19
| 67,726,018
| 258
| 205
|
MIT
| 2023-09-07T02:36:36
| 2016-09-08T17:39:46
|
Python
|
UTF-8
|
Python
| false
| false
| 143
|
py
|
app.py
|
#encoding:utf-8
subreddit = 'TrashTaste'
t_channel = '@r_TrashTaste'
def send_post(submission, r2t):
return r2t.send_simple(submission)
|
bf2b0956df428ce23806821a18ad8415067155d6
|
05169e203974411667ab947298a74575b8a179e0
|
/packages/jet_bridge_base/jet_bridge_base/filters/wkt_filter.py
|
df20b90abd78f907d329b1705622aab961eaf62c
|
[
"MIT"
] |
permissive
|
jet-admin/jet-bridge
|
f6b563e1801985063483ddb02e9e1c3301dc0612
|
c53d30fb308eed5822083eaf71f641c4098610cc
|
refs/heads/master
| 2023-09-01T14:31:42.261427
| 2023-08-24T13:54:34
| 2023-08-24T13:54:34
| 163,167,532
| 1,564
| 166
|
MIT
| 2023-03-18T03:20:04
| 2018-12-26T10:27:33
|
Python
|
UTF-8
|
Python
| false
| false
| 148
|
py
|
wkt_filter.py
|
from jet_bridge_base.fields import WKTField
from jet_bridge_base.filters.filter import Filter
class WKTFilter(Filter):
field_class = WKTField
|
8f5d9743cf9c92bb18c2f7b673c1df076b0eee51
|
cb6fda62ebf3c778680ee624fc3868442842cbff
|
/mmdet3d/utils/warmup_fp16_optimizer.py
|
308692e371996aca092f030b93938466f1886227
|
[
"Apache-2.0"
] |
permissive
|
Divadi/SOLOFusion
|
22503000079495aff9785177e32a1bd8d55feebf
|
683edce81b619098d1ba143d7b15b1e6aa23337a
|
refs/heads/main
| 2023-05-23T08:29:19.284487
| 2023-03-15T05:20:21
| 2023-03-15T05:20:21
| 546,174,873
| 188
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,063
|
py
|
warmup_fp16_optimizer.py
|
import copy
import logging
from collections import defaultdict
from itertools import chain
from typing import Optional, Union
import torch.nn as nn
from torch import Tensor
from torch.nn.utils import clip_grad
from torch.cuda.amp import GradScaler
from mmcv.utils import TORCH_VERSION, _BatchNorm, digit_version
from mmcv.runner.dist_utils import allreduce_grads
from mmcv.runner.fp16_utils import LossScaler, wrap_fp16_model
from mmcv.runner.hooks.hook import HOOKS, Hook
from mmcv.runner.hooks import OptimizerHook
@HOOKS.register_module()
class WarmupFp16OptimizerHook(OptimizerHook):
"""FP16 optimizer hook (using PyTorch's implementation).
If you are using PyTorch >= 1.6, torch.cuda.amp is used as the backend,
to take care of the optimization procedure.
Args:
loss_scale (float | str | dict): Scale factor configuration.
If loss_scale is a float, static loss scaling will be used with
the specified scale. If loss_scale is a string, it must be
'dynamic', then dynamic loss scaling will be used.
It can also be a dict containing arguments of GradScalar.
Defaults to 512. For Pytorch >= 1.6, mmcv uses official
implementation of GradScaler. If you use a dict version of
loss_scale to create GradScaler, please refer to:
https://pytorch.org/docs/stable/amp.html#torch.cuda.amp.GradScaler
for the parameters.
Examples:
>>> loss_scale = dict(
... init_scale=65536.0,
... growth_factor=2.0,
... backoff_factor=0.5,
... growth_interval=2000
... )
>>> optimizer_hook = Fp16OptimizerHook(loss_scale=loss_scale)
"""
def __init__(self,
grad_clip: Optional[dict] = None,
coalesce: bool = True,
bucket_size_mb: int = -1,
warmup_loss_scale_value: float = 1.,
warmup_loss_scale_iters: int = 100,
loss_scale: Union[float, str, dict] = 512.,
distributed: bool = True):
self.grad_clip = grad_clip
self.coalesce = coalesce
self.bucket_size_mb = bucket_size_mb
self.warmup_loss_scale_value = warmup_loss_scale_value
self.warmup_loss_scale_iters = warmup_loss_scale_iters
self.distributed = distributed
self._scale_update_param = None
if loss_scale == 'dynamic':
self.loss_scaler = GradScaler()
elif isinstance(loss_scale, float):
self._scale_update_param = loss_scale
self.loss_scaler = GradScaler(init_scale=loss_scale)
elif isinstance(loss_scale, dict):
self.loss_scaler = GradScaler(**loss_scale)
else:
raise ValueError('loss_scale must be of type float, dict, or '
f'"dynamic", got {loss_scale}')
self.post_warmup_scale = self.loss_scaler.get_scale()
def before_run(self, runner) -> None:
"""Preparing steps before Mixed Precision Training."""
# wrap model mode to fp16
wrap_fp16_model(runner.model)
# resume from state dict
if 'fp16' in runner.meta and 'loss_scaler' in runner.meta['fp16']:
scaler_state_dict = runner.meta['fp16']['loss_scaler']
self.loss_scaler.load_state_dict(scaler_state_dict)
def copy_grads_to_fp32(self, fp16_net: nn.Module,
fp32_weights: Tensor) -> None:
"""Copy gradients from fp16 model to fp32 weight copy."""
for fp32_param, fp16_param in zip(fp32_weights,
fp16_net.parameters()):
if fp16_param.grad is not None:
if fp32_param.grad is None:
fp32_param.grad = fp32_param.data.new(
fp32_param.size())
fp32_param.grad.copy_(fp16_param.grad)
def copy_params_to_fp16(self, fp16_net: nn.Module,
fp32_weights: Tensor) -> None:
"""Copy updated params from fp32 weight copy to fp16 model."""
for fp16_param, fp32_param in zip(fp16_net.parameters(),
fp32_weights):
fp16_param.data.copy_(fp32_param.data)
def after_train_iter(self, runner) -> None:
"""Backward optimization steps for Mixed Precision Training. For
dynamic loss scaling, please refer to
https://pytorch.org/docs/stable/amp.html#torch.cuda.amp.GradScaler.
1. Scale the loss by a scale factor.
2. Backward the loss to obtain the gradients.
3. Unscale the optimizer’s gradient tensors.
4. Call optimizer.step() and update scale factor.
5. Save loss_scaler state_dict for resume purpose.
"""
# clear grads of last iteration
runner.model.zero_grad()
runner.optimizer.zero_grad()
self.loss_scaler.scale(runner.outputs['loss']).backward()
self.loss_scaler.unscale_(runner.optimizer)
# grad clip
if self.grad_clip is not None:
grad_norm = self.clip_grads(runner.model.parameters())
if grad_norm is not None:
# Add grad norm to the logger
runner.log_buffer.update({'grad_norm': float(grad_norm)},
runner.outputs['num_samples'])
# backward and update scaler
self.loss_scaler.step(runner.optimizer)
if runner._iter < self.warmup_loss_scale_iters:
self.loss_scaler.update(self.warmup_loss_scale_value)
elif runner._iter == self.warmup_loss_scale_iters:
runner.logger.info("Ending FP16 Warmup, setting scale to {}".format(self.post_warmup_scale))
self.loss_scaler.update(self.post_warmup_scale)
else:
self.loss_scaler.update(self._scale_update_param)
# save state_dict of loss_scaler
runner.meta.setdefault(
'fp16', {})['loss_scaler'] = self.loss_scaler.state_dict()
|
e3fca19a02b0cf9a050e99d9523bd548bbc05162
|
a1d0efb4f7392d4265962faaa81d19be58c8811b
|
/fmriprep/utils/meepi.py
|
e73ca4fb242424ddbd998fbdf37d43e02cc374a8
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
nipreps/fmriprep
|
fef757d29748b611321c43c96385937fa9f44cdd
|
9bde234d90dd9fc7f0b35af5b091a27b7238c3f4
|
refs/heads/master
| 2023-09-03T15:08:22.467939
| 2023-08-27T17:44:43
| 2023-08-27T17:45:06
| 53,175,327
| 215
| 99
|
Apache-2.0
| 2023-09-07T03:24:26
| 2016-03-05T00:56:46
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,552
|
py
|
meepi.py
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2023 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""Multi-echo EPI utilities."""
def combine_meepi_source(in_files):
"""
Create a new source name when optimally
combining multiple multi-echo EPIs
>>> combine_meepi_source([
... 'sub-01_run-01_echo-1_bold.nii.gz',
... 'sub-01_run-01_echo-2_bold.nii.gz',
... 'sub-01_run-01_echo-3_bold.nii.gz',])
'sub-01_run-01_bold.nii.gz'
"""
import os
from nipype.utils.filemanip import filename_to_list
base, in_file = os.path.split(filename_to_list(in_files)[0])
entities = [ent for ent in in_file.split('_') if not ent.startswith('echo-')]
basename = '_'.join(entities)
return os.path.join(base, basename)
|
cbc50de96294db9c699f5c5016ed2f208f97a247
|
cba7444a9b6c7e3f9b557ff81c5ab03a2c8c6d8e
|
/tests/lib/utils.py
|
c8fb9b2304d5063e2c31e27f7cf1c3a1422803c4
|
[
"Apache-2.0"
] |
permissive
|
log2timeline/dftimewolf
|
e364e0eb213b6a8bb3648598c62fd622cc509755
|
bcea85b1ce7a0feb2aa28b5be4fc6ae124e8ca3c
|
refs/heads/main
| 2023-08-24T09:49:25.971929
| 2023-08-17T12:30:07
| 2023-08-17T12:30:07
| 64,484,320
| 248
| 81
|
Apache-2.0
| 2023-09-13T07:05:50
| 2016-07-29T13:54:45
|
Python
|
UTF-8
|
Python
| false
| false
| 1,923
|
py
|
utils.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests the utils module."""
from __future__ import unicode_literals
import os
import shutil
import tarfile
import tempfile
import unittest
import mock
import pandas as pd
from dftimewolf.lib import utils
class UtilsTest(unittest.TestCase):
"""Tests for the utils module."""
def setUp(self):
"""Test setup."""
self.tmp_input_dir = tempfile.mkdtemp(prefix='dftimewolf-input')
self.tmp_output_dir = tempfile.mkdtemp(prefix='dftimewolf-output')
def tearDown(self):
"""Tears Down class."""
if 'dftimewolf-input' in self.tmp_input_dir:
shutil.rmtree(self.tmp_input_dir)
if 'dftimewolf-output' in self.tmp_output_dir:
shutil.rmtree(self.tmp_output_dir)
@mock.patch('tempfile.mkdtemp')
def testCompress(self, mock_mkdtemp):
"""Tests the utils.Compress() method."""
test_data = 'SampleInput'
test_name = 'test_file.txt'
mock_mkdtemp.return_value = self.tmp_output_dir
test_file = os.path.join(self.tmp_input_dir, test_name)
with open(test_file, 'w') as test_file_fh:
test_file_fh.write(test_data)
output_file = utils.Compress(self.tmp_input_dir)
self.assertTrue(os.path.exists(output_file))
self.assertTrue(tarfile.is_tarfile(output_file))
tar = tarfile.TarFile.open(output_file)
member_name = tar.getmembers()[1].name
self.assertIn(test_name, member_name)
member_data = (
tar.extractfile(member_name).read()) # pytype: disable=attribute-error
self.assertEqual(member_data, test_data.encode('utf-8'))
def testWriteDataFrameToJsonl(self):
"""Tests the utils.WriteDataFrameToJsonl() method."""
sample_df = pd.DataFrame([1], [0], ['foo'])
expected_jsonl = '{"foo":1}\n'
filename = utils.WriteDataFrameToJsonl(sample_df)
with open(filename) as f:
contents = ''.join(f.readlines())
self.assertEqual(contents, expected_jsonl)
|
cef0bbb1a02c1803e6c6ab2e54769a2009e09723
|
316b99c6046ff58c8499e0c214e9b81d9c3132b0
|
/beartype_test/a00_unit/a20_util/hint/a00_pep/test_a00_utilpepget.py
|
61eb0d0725a41bbc561741220c67dd4e2f591b90
|
[
"MIT",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
beartype/beartype
|
fb6417b3dc2e08c065f0d907f43411c33d883a7d
|
0cfd53391eb4de2f8297a4632aa5895b8d82a5b7
|
refs/heads/main
| 2023-08-15T13:17:47.095732
| 2023-08-15T05:25:54
| 2023-08-15T05:25:54
| 252,646,465
| 1,992
| 51
|
MIT
| 2023-07-28T04:13:08
| 2020-04-03T06:06:22
|
Python
|
UTF-8
|
Python
| false
| false
| 8,728
|
py
|
test_a00_utilpepget.py
|
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2023 Beartype authors.
# See "LICENSE" for further details.
'''
Project-wide **PEP-compliant type hint getter** unit tests.
This submodule unit tests the public API of the private
:mod:`beartype._util.hint.pep.utilpepget` submodule.
'''
# ....................{ IMPORTS }....................
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# WARNING: To raise human-readable test errors, avoid importing from
# package-specific submodules at module scope.
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# from beartype_test._util.mark.pytskip import skip_if_python_version_less_than
# ....................{ TESTS ~ attr }....................
def test_get_hint_pep_args() -> None:
'''
Test the
:func:`beartype._util.hint.pep.utilpepget.get_hint_pep_args`
getter.
'''
# ....................{ IMPORTS }....................
# Defer test-specific imports.
from beartype._util.hint.pep.utilpepget import (
_HINT_ARGS_EMPTY_TUPLE,
get_hint_pep_args,
)
from beartype._util.py.utilpyversion import IS_PYTHON_AT_LEAST_3_9
from beartype_test.a00_unit.data.hint.data_hint import NOT_HINTS_PEP
from beartype_test.a00_unit.data.hint.pep.data_pep import HINTS_PEP_META
from typing import Tuple
# ....................{ PASS }....................
# For each PEP-compliant hint, assert this getter returns...
for hint_pep_meta in HINTS_PEP_META:
# Tuple of all arguments subscripting this hint.
hint_args = get_hint_pep_args(hint_pep_meta.hint)
assert isinstance(hint_args, tuple)
# For subscripted hints, one or more arguments.
if hint_pep_meta.is_args:
assert hint_args
# For non-argumentative hints, *NO* arguments.
else:
assert hint_args == ()
# ....................{ PASS ~ pep }....................
#FIXME: Explicitly validate that this getter handles both PEP 484- and 585-
#compliant empty tuples by returning "_HINT_ARGS_EMPTY_TUPLE" as expected,
#please. This is sufficiently critical that we *NEED* to ensure this.
# Assert that this getter when passed a PEP 484-compliant empty tuple type
# hint returns a tuple containing an empty tuple for disambiguity.
assert get_hint_pep_args(Tuple[()]) == _HINT_ARGS_EMPTY_TUPLE
# If Python >= 3.9, the active Python interpreter supports PEP 585. In this
# case, assert that this getter when passed a PEP 585-compliant empty tuple
# type hint returns a tuple containing an empty tuple for disambiguity.
if IS_PYTHON_AT_LEAST_3_9:
assert get_hint_pep_args(tuple[()]) == _HINT_ARGS_EMPTY_TUPLE
# ....................{ FAIL }....................
# Assert this getter returns *NO* type variables for non-"typing" hints.
for not_hint_pep in NOT_HINTS_PEP:
assert get_hint_pep_args(not_hint_pep) == ()
def test_get_hint_pep_typevars() -> None:
'''
Test the
:func:`beartype._util.hint.pep.utilpepget.get_hint_pep_typevars`
getter.
'''
# Defer test-specific imports.
from beartype._data.hint.pep.sign.datapepsigns import HintSignTypeVar
from beartype._util.hint.pep.utilpepget import (
get_hint_pep_typevars,
get_hint_pep_sign_or_none,
)
from beartype_test.a00_unit.data.hint.data_hint import NOT_HINTS_PEP
from beartype_test.a00_unit.data.hint.pep.data_pep import HINTS_PEP_META
# For each PEP-compliant hint, assert this getter returns...
for hint_pep_meta in HINTS_PEP_META:
# Tuple of all type variables subscripting this hint.
hint_typevars = get_hint_pep_typevars(hint_pep_meta.hint)
assert isinstance(hint_typevars, tuple)
# For typevared hints, one or more type variables.
if hint_pep_meta.is_typevars:
assert hint_typevars
for hint_typevar in hint_typevars:
assert get_hint_pep_sign_or_none(hint_typevar) is (
HintSignTypeVar)
# For non-typevared hints, *NO* type variables.
else:
assert hint_typevars == ()
# Assert this getter returns *NO* type variables for non-"typing" hints.
for not_hint_pep in NOT_HINTS_PEP:
assert get_hint_pep_typevars(not_hint_pep) == ()
# ....................{ TESTS ~ sign }....................
def test_get_hint_pep_sign() -> None:
'''
Test the
:func:`beartype._util.hint.pep.utilpepget.get_hint_pep_sign` getter.
'''
# Defer test-specific imports.
from beartype.roar import BeartypeDecorHintPepSignException
from beartype._util.hint.pep.utilpepget import get_hint_pep_sign
from beartype_test.a00_unit.data.hint.data_hint import (
HINTS_NONPEP, NonpepCustomFakeTyping)
from beartype_test.a00_unit.data.hint.pep.data_pep import (
HINTS_PEP_META)
from pytest import raises
# Assert this getter returns the expected unsubscripted "typing" attribute
# for all PEP-compliant type hints associated with such an attribute.
for hint_pep_meta in HINTS_PEP_META:
assert get_hint_pep_sign(hint_pep_meta.hint) is hint_pep_meta.pep_sign
# Assert this getter raises the expected exception for an instance of a
# class erroneously masquerading as a "typing" class.
with raises(BeartypeDecorHintPepSignException):
# Localize this return value to simplify debugging.
hint_nonpep_sign = get_hint_pep_sign(NonpepCustomFakeTyping())
# Assert this getter raises the expected exception for non-"typing" hints.
for hint_nonpep in HINTS_NONPEP:
with raises(BeartypeDecorHintPepSignException):
# Localize this return value to simplify debugging.
hint_nonpep_sign = get_hint_pep_sign(hint_nonpep)
# ....................{ TESTS ~ origin : type }....................
def test_get_hint_pep_type_isinstanceable() -> None:
'''
Test the
:func:`beartype._util.hint.pep.utilpepget.get_hint_pep_origin_type_isinstanceable`
getter.
'''
# Defer test-specific imports.
from beartype.roar import BeartypeDecorHintPepException
from beartype._util.hint.pep.utilpepget import (
get_hint_pep_origin_type_isinstanceable)
from beartype_test.a00_unit.data.hint.data_hint import NOT_HINTS_PEP
from beartype_test.a00_unit.data.hint.pep.data_pep import (
HINTS_PEP_META)
from pytest import raises
# Assert this getter...
for hint_pep_meta in HINTS_PEP_META:
# Returns the expected type origin for all PEP-compliant type hints
# originating from an origin type.
if hint_pep_meta.isinstanceable_type is not None:
assert get_hint_pep_origin_type_isinstanceable(hint_pep_meta.hint) is (
hint_pep_meta.isinstanceable_type)
# Raises the expected exception for all other hints.
else:
with raises(BeartypeDecorHintPepException):
get_hint_pep_origin_type_isinstanceable(hint_pep_meta.hint)
# Assert this getter raises the expected exception for non-PEP-compliant
# type hints.
for not_hint_pep in NOT_HINTS_PEP:
with raises(BeartypeDecorHintPepException):
get_hint_pep_origin_type_isinstanceable(not_hint_pep)
def test_get_hint_pep_type_isinstanceable_or_none() -> None:
'''
Test the
:func:`beartype._util.hint.pep.utilpepget.get_hint_pep_origin_type_isinstanceable_or_none`
getter.
'''
# Defer test-specific imports.
from beartype.roar import BeartypeDecorHintPepException
from beartype._util.hint.pep.utilpepget import (
get_hint_pep_origin_type_isinstanceable_or_none)
from beartype_test.a00_unit.data.hint.data_hint import NOT_HINTS_PEP
from beartype_test.a00_unit.data.hint.pep.data_pep import (
HINTS_PEP_META)
from pytest import raises
# Assert this getter returns the expected type origin for all PEP-compliant
# type hints.
for hint_pep_meta in HINTS_PEP_META:
assert get_hint_pep_origin_type_isinstanceable_or_none(hint_pep_meta.hint) is (
hint_pep_meta.isinstanceable_type)
# Assert this getter raises the expected exception for non-PEP-compliant
# type hints.
for not_hint_pep in NOT_HINTS_PEP:
with raises(BeartypeDecorHintPepException):
get_hint_pep_origin_type_isinstanceable_or_none(not_hint_pep)
|
939a0f86d92cf51402e41b2f6fbdb727c4006c8f
|
afd2087e80478010d9df66e78280f75e1ff17d45
|
/torch/_export/db/examples/class_method.py
|
77c629559d21eb6390c00ce8143d773d16f5710f
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] |
permissive
|
pytorch/pytorch
|
7521ac50c47d18b916ae47a6592c4646c2cb69b5
|
a6f7dd4707ac116c0f5fb5f44f42429f38d23ab4
|
refs/heads/main
| 2023-08-03T05:05:02.822937
| 2023-08-03T00:40:33
| 2023-08-03T04:14:52
| 65,600,975
| 77,092
| 24,610
|
NOASSERTION
| 2023-09-14T21:58:39
| 2016-08-13T05:26:41
|
Python
|
UTF-8
|
Python
| false
| false
| 509
|
py
|
class_method.py
|
import torch
from torch._export.db.case import export_case
@export_case(
example_inputs=(torch.ones(3, 4),),
)
class ClassMethod(torch.nn.Module):
"""
Class methods are inlined during tracing.
"""
@classmethod
def method(cls, x):
return x + 1
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(4, 2)
def forward(self, x):
x = self.linear(x)
return self.method(x) * self.__class__.method(x) * type(self).method(x)
|
2790c5fea2cbbc3f25ac70fa71174f3aeae58918
|
ecaba173879f92f24e3c951866fda23c0a4fc426
|
/tests/providers/aws/s3_test.py
|
7aae7a84dd8432d6b60cbf0eb9044e3cac7cfe6d
|
[
"Classpath-exception-2.0",
"BSD-3-Clause",
"AGPL-3.0-only",
"MIT",
"GPL-2.0-only",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
GoogleCloudPlatform/PerfKitBenchmarker
|
2f4917fd796db4eb90822c557d8fa08a497fbd48
|
d0699f32998898757b036704fba39e5471641f01
|
refs/heads/master
| 2023-09-02T08:14:54.110308
| 2023-09-01T20:28:01
| 2023-09-01T20:28:38
| 21,950,910
| 1,923
| 567
|
Apache-2.0
| 2023-09-13T22:37:42
| 2014-07-17T17:23:26
|
Python
|
UTF-8
|
Python
| false
| false
| 1,451
|
py
|
s3_test.py
|
"""Tests for the AWS S3 service."""
import unittest
import mock
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.aws import s3
from tests import pkb_common_test_case
class S3Test(pkb_common_test_case.PkbCommonTestCase):
def setUp(self):
super(S3Test, self).setUp()
flag_values = {
'timeout_minutes': 0,
'persistent_timeout_minutes': 0}
p = mock.patch.object(s3, 'FLAGS')
flags_mock = p.start()
flags_mock.configure_mock(**flag_values)
self.mock_command = mock.patch.object(vm_util, 'IssueCommand').start()
self.mock_retryable_command = mock.patch.object(
vm_util, 'IssueRetryableCommand').start()
self.s3_service = s3.S3Service()
self.s3_service.PrepareService(None) # will use s3.DEFAULT_AWS_REGION
def tearDown(self):
super(S3Test, self).tearDown()
mock.patch.stopall()
def test_make_bucket(self):
self.mock_command.return_value = (None, None, None)
self.s3_service.MakeBucket(bucket_name='test_bucket')
self.mock_command.assert_called_once_with([
'aws', 's3', 'mb', 's3://test_bucket',
'--region={}'.format(s3.DEFAULT_AWS_REGION)], raise_on_failure=False)
self.mock_retryable_command.assert_called_once_with([
'aws', 's3api', 'put-bucket-tagging', '--bucket', 'test_bucket',
'--tagging', 'TagSet=[]', '--region={}'.format(s3.DEFAULT_AWS_REGION)])
if __name__ == '__main__':
unittest.main()
|
c0a1d80850fac807b15778413f0fef47501b2055
|
e4af1d5cdf75507db0a2036fe3d3f78e4c88dcfe
|
/tests/conftest.py
|
57101c66f20bb6e6a8d157b9ad8d1c069f434015
|
[
"MIT"
] |
permissive
|
mvantellingen/localshop
|
596073ecdc569dd9c045d0420df23f06aefe196f
|
875ae6d056282bb9d33c07ab69d7bae8e02d5d66
|
refs/heads/master
| 2023-08-31T15:28:08.623494
| 2022-07-17T19:39:21
| 2022-07-17T19:39:21
| 3,359,149
| 204
| 73
|
MIT
| 2023-04-21T20:53:22
| 2012-02-05T12:03:50
|
Less
|
UTF-8
|
Python
| false
| false
| 2,407
|
py
|
conftest.py
|
import os
import re
import pytest
import requests_mock
from django.contrib.auth.models import AnonymousUser
from django.contrib.messages.storage.fallback import FallbackStorage
from django.contrib.sessions.backends.db import SessionStore
from django.test.client import RequestFactory as BaseRequestFactory
from django.test.utils import override_settings
from localshop.apps.packages.pypi import get_search_names
from tests.factories import CIDRFactory, RepositoryFactory
def pytest_configure(config):
override = override_settings(
ALLOWED_HOSTS=['*'],
STATICFILES_STORAGE='django.contrib.staticfiles.storage.StaticFilesStorage',
CELERY_TASK_ALWAYS_EAGER=True,
)
override.enable()
@pytest.fixture(scope='function')
def pypi_stub():
with requests_mock.Mocker(real_http=True) as rm:
wildcard_re = re.compile('^https://pypi\.internal/.*')
rm.register_uri('GET', wildcard_re, status_code=404)
pypi_dir = os.path.join(os.path.dirname(__file__), 'pypi_data')
for filename in os.listdir(pypi_dir):
with open(os.path.join(pypi_dir, filename), 'rb') as fh:
content = fh.read()
name, ext = os.path.splitext(filename)
url = 'https://pypi.internal/pypi/%s/json' % name
rm.register_uri('GET', url, content=content)
# Register the alternative urls and redirect to original url
for alt_name in get_search_names(name):
if alt_name != name:
alt_url = 'https://pypi.internal/pypi/%s/json' % alt_name
rm.register_uri(
'GET',
alt_url,
headers={
'Location': url,
},
status_code=301)
yield 'https://pypi.internal/pypi/'
@pytest.fixture(scope='function')
@pytest.mark.django_db
def repository(db):
repo = RepositoryFactory()
CIDRFactory(repository=repo)
return repo
class RequestFactory(BaseRequestFactory):
def request(self, user=None, **request):
request = super(RequestFactory, self).request(**request)
request.user = AnonymousUser()
request.session = SessionStore()
request._messages = FallbackStorage(request)
return request
@pytest.fixture()
def rf():
return RequestFactory()
|
ce4b9e38a3b6ff9768ed538a7b2b253de4fffae0
|
5bd1490ada452d262819b51d240b519b7264dbd8
|
/Chapter 9/ch9_4.py
|
a2df4d2ab80dc700072652b40aeb15d6d82d40d3
|
[] |
no_license
|
PacktPublishing/Mastering-Natural-Language-Processing-with-Python
|
59feee3a1ac0751f97256af328c6957adaeb7111
|
61fb2091f8c2d42fa5f14cb02664b0f2ca9127a1
|
refs/heads/master
| 2022-11-05T20:29:52.245545
| 2022-10-28T07:52:43
| 2022-10-28T07:52:43
| 60,772,409
| 142
| 124
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 153
|
py
|
ch9_4.py
|
import nltk
expr_read = nltk.sem.DrtExpression.fromstring
expr4 = expr_read('([],[(([x],[student(x)])->([y],[book(y),read(x,y)]))])')
print(expr4.fol())
|
8cf44fc4495cc7539954c628b9194b3ac3cc547f
|
01760737a6d1afb2bd303c6dda282f327ec60d27
|
/tornado/wsgi.py
|
32641be30ff7a814da06a2cf09c0af88976eb999
|
[
"Apache-2.0"
] |
permissive
|
tornadoweb/tornado
|
1ef2c7c6ce38523b2ea7d16bdd02c27407734510
|
a48d63446afdb384fa0f53ac707dea9d250dc183
|
refs/heads/master
| 2023-09-03T22:24:48.246702
| 2023-09-02T14:03:40
| 2023-09-02T14:03:40
| 301,742
| 16,150
| 5,335
|
Apache-2.0
| 2023-09-02T14:18:29
| 2009-09-09T04:55:16
|
Python
|
UTF-8
|
Python
| false
| false
| 10,817
|
py
|
wsgi.py
|
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""WSGI support for the Tornado web framework.
WSGI is the Python standard for web servers, and allows for interoperability
between Tornado and other Python web frameworks and servers.
This module provides WSGI support via the `WSGIContainer` class, which
makes it possible to run applications using other WSGI frameworks on
the Tornado HTTP server. The reverse is not supported; the Tornado
`.Application` and `.RequestHandler` classes are designed for use with
the Tornado `.HTTPServer` and cannot be used in a generic WSGI
container.
"""
import concurrent.futures
from io import BytesIO
import tornado
import sys
from tornado.concurrent import dummy_executor
from tornado import escape
from tornado import httputil
from tornado.ioloop import IOLoop
from tornado.log import access_log
from typing import List, Tuple, Optional, Callable, Any, Dict, Text
from types import TracebackType
import typing
if typing.TYPE_CHECKING:
from typing import Type # noqa: F401
from _typeshed.wsgi import WSGIApplication as WSGIAppType # noqa: F401
# PEP 3333 specifies that WSGI on python 3 generally deals with byte strings
# that are smuggled inside objects of type unicode (via the latin1 encoding).
# This function is like those in the tornado.escape module, but defined
# here to minimize the temptation to use it in non-wsgi contexts.
def to_wsgi_str(s: bytes) -> str:
assert isinstance(s, bytes)
return s.decode("latin1")
class WSGIContainer(object):
r"""Makes a WSGI-compatible application runnable on Tornado's HTTP server.
.. warning::
WSGI is a *synchronous* interface, while Tornado's concurrency model
is based on single-threaded *asynchronous* execution. Many of Tornado's
distinguishing features are not available in WSGI mode, including efficient
long-polling and websockets. The primary purpose of `WSGIContainer` is
to support both WSGI applications and native Tornado ``RequestHandlers`` in
a single process. WSGI-only applications are likely to be better off
with a dedicated WSGI server such as ``gunicorn`` or ``uwsgi``.
Wrap a WSGI application in a `WSGIContainer` to make it implement the Tornado
`.HTTPServer` ``request_callback`` interface. The `WSGIContainer` object can
then be passed to classes from the `tornado.routing` module,
`tornado.web.FallbackHandler`, or to `.HTTPServer` directly.
This class is intended to let other frameworks (Django, Flask, etc)
run on the Tornado HTTP server and I/O loop.
Realistic usage will be more complicated, but the simplest possible example uses a
hand-written WSGI application with `.HTTPServer`::
def simple_app(environ, start_response):
status = "200 OK"
response_headers = [("Content-type", "text/plain")]
start_response(status, response_headers)
return [b"Hello world!\n"]
async def main():
container = tornado.wsgi.WSGIContainer(simple_app)
http_server = tornado.httpserver.HTTPServer(container)
http_server.listen(8888)
await asyncio.Event().wait()
asyncio.run(main())
The recommended pattern is to use the `tornado.routing` module to set up routing
rules between your WSGI application and, typically, a `tornado.web.Application`.
Alternatively, `tornado.web.Application` can be used as the top-level router
and `tornado.web.FallbackHandler` can embed a `WSGIContainer` within it.
If the ``executor`` argument is provided, the WSGI application will be executed
on that executor. This must be an instance of `concurrent.futures.Executor`,
typically a ``ThreadPoolExecutor`` (``ProcessPoolExecutor`` is not supported).
If no ``executor`` is given, the application will run on the event loop thread in
Tornado 6.3; this will change to use an internal thread pool by default in
Tornado 7.0.
.. warning::
By default, the WSGI application is executed on the event loop's thread. This
limits the server to one request at a time (per process), making it less scalable
than most other WSGI servers. It is therefore highly recommended that you pass
a ``ThreadPoolExecutor`` when constructing the `WSGIContainer`, after verifying
that your application is thread-safe. The default will change to use a
``ThreadPoolExecutor`` in Tornado 7.0.
.. versionadded:: 6.3
The ``executor`` parameter.
.. deprecated:: 6.3
The default behavior of running the WSGI application on the event loop thread
is deprecated and will change in Tornado 7.0 to use a thread pool by default.
"""
def __init__(
self,
wsgi_application: "WSGIAppType",
executor: Optional[concurrent.futures.Executor] = None,
) -> None:
self.wsgi_application = wsgi_application
self.executor = dummy_executor if executor is None else executor
def __call__(self, request: httputil.HTTPServerRequest) -> None:
IOLoop.current().spawn_callback(self.handle_request, request)
async def handle_request(self, request: httputil.HTTPServerRequest) -> None:
data = {} # type: Dict[str, Any]
response = [] # type: List[bytes]
def start_response(
status: str,
headers: List[Tuple[str, str]],
exc_info: Optional[
Tuple[
"Optional[Type[BaseException]]",
Optional[BaseException],
Optional[TracebackType],
]
] = None,
) -> Callable[[bytes], Any]:
data["status"] = status
data["headers"] = headers
return response.append
loop = IOLoop.current()
app_response = await loop.run_in_executor(
self.executor,
self.wsgi_application,
self.environ(request),
start_response,
)
try:
app_response_iter = iter(app_response)
def next_chunk() -> Optional[bytes]:
try:
return next(app_response_iter)
except StopIteration:
# StopIteration is special and is not allowed to pass through
# coroutines normally.
return None
while True:
chunk = await loop.run_in_executor(self.executor, next_chunk)
if chunk is None:
break
response.append(chunk)
finally:
if hasattr(app_response, "close"):
app_response.close() # type: ignore
body = b"".join(response)
if not data:
raise Exception("WSGI app did not call start_response")
status_code_str, reason = data["status"].split(" ", 1)
status_code = int(status_code_str)
headers = data["headers"] # type: List[Tuple[str, str]]
header_set = set(k.lower() for (k, v) in headers)
body = escape.utf8(body)
if status_code != 304:
if "content-length" not in header_set:
headers.append(("Content-Length", str(len(body))))
if "content-type" not in header_set:
headers.append(("Content-Type", "text/html; charset=UTF-8"))
if "server" not in header_set:
headers.append(("Server", "TornadoServer/%s" % tornado.version))
start_line = httputil.ResponseStartLine("HTTP/1.1", status_code, reason)
header_obj = httputil.HTTPHeaders()
for key, value in headers:
header_obj.add(key, value)
assert request.connection is not None
request.connection.write_headers(start_line, header_obj, chunk=body)
request.connection.finish()
self._log(status_code, request)
def environ(self, request: httputil.HTTPServerRequest) -> Dict[Text, Any]:
"""Converts a `tornado.httputil.HTTPServerRequest` to a WSGI environment.
.. versionchanged:: 6.3
No longer a static method.
"""
hostport = request.host.split(":")
if len(hostport) == 2:
host = hostport[0]
port = int(hostport[1])
else:
host = request.host
port = 443 if request.protocol == "https" else 80
environ = {
"REQUEST_METHOD": request.method,
"SCRIPT_NAME": "",
"PATH_INFO": to_wsgi_str(
escape.url_unescape(request.path, encoding=None, plus=False)
),
"QUERY_STRING": request.query,
"REMOTE_ADDR": request.remote_ip,
"SERVER_NAME": host,
"SERVER_PORT": str(port),
"SERVER_PROTOCOL": request.version,
"wsgi.version": (1, 0),
"wsgi.url_scheme": request.protocol,
"wsgi.input": BytesIO(escape.utf8(request.body)),
"wsgi.errors": sys.stderr,
"wsgi.multithread": self.executor is not dummy_executor,
"wsgi.multiprocess": True,
"wsgi.run_once": False,
}
if "Content-Type" in request.headers:
environ["CONTENT_TYPE"] = request.headers.pop("Content-Type")
if "Content-Length" in request.headers:
environ["CONTENT_LENGTH"] = request.headers.pop("Content-Length")
for key, value in request.headers.items():
environ["HTTP_" + key.replace("-", "_").upper()] = value
return environ
def _log(self, status_code: int, request: httputil.HTTPServerRequest) -> None:
if status_code < 400:
log_method = access_log.info
elif status_code < 500:
log_method = access_log.warning
else:
log_method = access_log.error
request_time = 1000.0 * request.request_time()
assert request.method is not None
assert request.uri is not None
summary = (
request.method # type: ignore[operator]
+ " "
+ request.uri
+ " ("
+ request.remote_ip
+ ")"
)
log_method("%d %s %.2fms", status_code, summary, request_time)
HTTPRequest = httputil.HTTPServerRequest
|
1fc8f644078738dc1a2183194e01ac98f16e2b44
|
f791462fb1286607d16459c1602d133f8d8c8b59
|
/test/test_flows.py
|
9e65cffbc5db768954deddc5627bbc0c229a48c6
|
[
"Apache-2.0"
] |
permissive
|
pyro-ppl/numpyro
|
b071ed2bd93be41bafc3da8764c9f5617f996d92
|
ca96eca8e8e1531e71ba559ef7a8ad3b4b68cbc2
|
refs/heads/master
| 2023-09-03T15:56:13.252692
| 2023-08-28T14:32:25
| 2023-08-28T14:32:25
| 170,580,540
| 1,941
| 219
|
Apache-2.0
| 2023-09-04T11:26:11
| 2019-02-13T21:13:59
|
Python
|
UTF-8
|
Python
| false
| false
| 3,262
|
py
|
test_flows.py
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
from functools import partial
import numpy as np
from numpy.testing import assert_allclose
import pytest
from jax import jacfwd, random
from jax.example_libraries import stax
from numpyro.distributions.flows import (
BlockNeuralAutoregressiveTransform,
InverseAutoregressiveTransform,
)
from numpyro.distributions.util import matrix_to_tril_vec
from numpyro.nn import AutoregressiveNN, BlockNeuralAutoregressiveNN
def _make_iaf_args(input_dim, hidden_dims):
_, rng_perm = random.split(random.PRNGKey(0))
perm = random.permutation(rng_perm, np.arange(input_dim))
# we use Elu nonlinearity because the default one, Relu, masks out negative hidden values,
# which in turn create some zero entries in the lower triangular part of Jacobian.
arn_init, arn = AutoregressiveNN(
input_dim,
hidden_dims,
param_dims=[1, 1],
permutation=perm,
nonlinearity=stax.Elu,
)
_, init_params = arn_init(random.PRNGKey(0), (input_dim,))
return (partial(arn, init_params),)
def _make_bnaf_args(input_dim, hidden_factors):
arn_init, arn = BlockNeuralAutoregressiveNN(input_dim, hidden_factors)
_, rng_key_perm = random.split(random.PRNGKey(0))
_, init_params = arn_init(random.PRNGKey(0), (input_dim,))
return (partial(arn, init_params),)
@pytest.mark.parametrize(
"flow_class, flow_args, input_dim",
[
(InverseAutoregressiveTransform, _make_iaf_args(5, hidden_dims=[10]), 5),
(InverseAutoregressiveTransform, _make_iaf_args(7, hidden_dims=[8, 9]), 7),
(BlockNeuralAutoregressiveTransform, _make_bnaf_args(7, hidden_factors=[4]), 7),
(
BlockNeuralAutoregressiveTransform,
_make_bnaf_args(7, hidden_factors=[2, 3]),
7,
),
],
)
@pytest.mark.parametrize("batch_shape", [(), (1,), (4,), (2, 3)])
def test_flows(flow_class, flow_args, input_dim, batch_shape):
transform = flow_class(*flow_args)
x = random.normal(random.PRNGKey(0), batch_shape + (input_dim,))
# test inverse is correct
y = transform(x)
try:
inv = transform.inv(y)
assert_allclose(x, inv, atol=1e-5)
except NotImplementedError:
pass
# test jacobian shape
actual = transform.log_abs_det_jacobian(x, y)
assert np.shape(actual) == batch_shape
if batch_shape == ():
# make sure transform.log_abs_det_jacobian is correct
jac = jacfwd(transform)(x)
expected = np.linalg.slogdet(jac)[1]
assert_allclose(actual, expected, atol=1e-5)
# make sure jacobian is triangular, first permute jacobian as necessary
if isinstance(transform, InverseAutoregressiveTransform):
permuted_jac = np.zeros(jac.shape)
_, rng_key_perm = random.split(random.PRNGKey(0))
perm = random.permutation(rng_key_perm, np.arange(input_dim))
for j in range(input_dim):
for k in range(input_dim):
permuted_jac[j, k] = jac[perm[j], perm[k]]
jac = permuted_jac
assert np.sum(np.abs(np.triu(jac, 1))) == 0.00
assert np.all(np.abs(matrix_to_tril_vec(jac)) > 0)
|
b1159ce96ac1127853ed150801ad2a77bb3b2d01
|
0b134572e3ac3903ebb44df6d4138cbab9d3327c
|
/app/tests/archives_tests/test_permissions.py
|
0525cf6b3627c5f78003afe702ccd16f23acccfb
|
[
"Apache-2.0"
] |
permissive
|
comic/grand-challenge.org
|
660de3bafaf8f4560317f1dfd9ae9585ec272896
|
dac25f93b395974b32ba2a8a5f9e19b84b49e09d
|
refs/heads/main
| 2023-09-01T15:57:14.790244
| 2023-08-31T14:23:04
| 2023-08-31T14:23:04
| 4,557,968
| 135
| 53
|
Apache-2.0
| 2023-09-14T13:41:03
| 2012-06-05T09:26:39
|
Python
|
UTF-8
|
Python
| false
| false
| 3,256
|
py
|
test_permissions.py
|
import pytest
from django.conf import settings
from django.contrib.auth.models import Group
from guardian.shortcuts import get_perms, get_users_with_perms
from grandchallenge.archives.models import Archive
from grandchallenge.components.models import InterfaceKind
from tests.archives_tests.factories import ArchiveFactory, ArchiveItemFactory
from tests.components_tests.factories import ComponentInterfaceFactory
from tests.evaluation_tests.test_permissions import get_groups_with_set_perms
from tests.factories import UserFactory
from tests.utils import get_view_for_user
@pytest.mark.django_db
class TestArchivePermissions:
@pytest.mark.parametrize("public", (True, False))
def test_archive_permissions(self, public):
a: Archive = ArchiveFactory(public=public)
expected_perms = {
a.editors_group: {
"view_archive",
"use_archive",
"upload_archive",
"change_archive",
},
a.uploaders_group: {
"view_archive",
"use_archive",
"upload_archive",
},
a.users_group: {"view_archive", "use_archive"},
}
if public:
reg_and_anon = Group.objects.get(
name=settings.REGISTERED_AND_ANON_USERS_GROUP_NAME
)
expected_perms[reg_and_anon] = {"view_archive"}
assert get_groups_with_set_perms(a) == expected_perms
assert get_users_with_perms(a, with_group_users=False).count() == 0
@pytest.mark.django_db
def test_visible_to_public_group_permissions(self):
g_reg_anon = Group.objects.get(
name=settings.REGISTERED_AND_ANON_USERS_GROUP_NAME
)
a = ArchiveFactory()
assert "view_archive" not in get_perms(g_reg_anon, a)
a.public = True
a.save()
assert "view_archive" in get_perms(g_reg_anon, a)
a.public = False
a.save()
assert "view_archive" not in get_perms(g_reg_anon, a)
@pytest.mark.parametrize(
"add_to_group,status",
[
(Archive.add_user, 403),
(Archive.add_uploader, 200),
(Archive.add_editor, 200),
(None, 404),
],
)
@pytest.mark.django_db
def test_api_archive_item_update_permissions(
client, settings, add_to_group, status, django_capture_on_commit_callbacks
):
# Override the celery settings
settings.task_eager_propagates = (True,)
settings.task_always_eager = (True,)
archive = ArchiveFactory()
user = UserFactory()
item = ArchiveItemFactory(archive=archive)
if add_to_group:
add_to_group(archive, user)
ci = ComponentInterfaceFactory(
kind=InterfaceKind.InterfaceKindChoices.BOOL
)
with django_capture_on_commit_callbacks(execute=True):
response = get_view_for_user(
viewname="api:archives-item-detail",
reverse_kwargs={"pk": item.pk},
data={"values": [{"interface": ci.slug, "value": True}]},
user=user,
client=client,
method=client.patch,
content_type="application/json",
HTTP_X_FORWARDED_PROTO="https",
)
assert response.status_code == status
|
3fd70b0fc36ef0958e7f6196943efc46d0def2c5
|
4674b8088ffdf55905d44995f08a0792a3e4cd5c
|
/tests/hwsim/test_p2p_persistent.py
|
673fda3dd20285b77e6ef362c642df9f7eb63f34
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
vanhoefm/krackattacks-scripts
|
41daca791638a92aa4cfa68a582e46119037560e
|
4b78669686f74efe664c6543b1b5b1616b22f902
|
refs/heads/research
| 2022-10-29T20:21:11.512335
| 2022-10-16T18:44:41
| 2022-10-16T18:44:41
| 107,408,514
| 2,184
| 577
|
NOASSERTION
| 2021-07-06T12:43:49
| 2017-10-18T12:58:08
|
C
|
UTF-8
|
Python
| false
| false
| 29,199
|
py
|
test_p2p_persistent.py
|
# P2P persistent group test cases
# Copyright (c) 2013-2014, Jouni Malinen <j@w1.fi>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
from remotehost import remote_compatible
import logging
logger = logging.getLogger()
import re
import time
import hwsim_utils
from p2p_utils import *
@remote_compatible
def test_persistent_group(dev):
"""P2P persistent group formation and re-invocation"""
form(dev[0], dev[1])
invite_from_cli(dev[0], dev[1])
invite_from_go(dev[0], dev[1])
logger.info("Remove group on the client and try to invite from GO")
id = None
for n in dev[0].list_networks(p2p=True):
if "[P2P-PERSISTENT]" in n['flags']:
id = n['id']
break
if id is None:
raise Exception("Could not find persistent group entry")
clients = dev[0].global_request("GET_NETWORK " + id + " p2p_client_list").rstrip()
if dev[1].p2p_dev_addr() not in clients:
raise Exception("Peer missing from client list")
if "FAIL" not in dev[1].request("SELECT_NETWORK " + str(id)):
raise Exception("SELECT_NETWORK succeeded unexpectedly")
if "FAIL" not in dev[1].request("SELECT_NETWORK 1234567"):
raise Exception("SELECT_NETWORK succeeded unexpectedly(2)")
if "FAIL" not in dev[1].request("ENABLE_NETWORK " + str(id)):
raise Exception("ENABLE_NETWORK succeeded unexpectedly")
if "FAIL" not in dev[1].request("ENABLE_NETWORK 1234567"):
raise Exception("ENABLE_NETWORK succeeded unexpectedly(2)")
if "FAIL" not in dev[1].request("DISABLE_NETWORK " + str(id)):
raise Exception("DISABLE_NETWORK succeeded unexpectedly")
if "FAIL" not in dev[1].request("DISABLE_NETWORK 1234567"):
raise Exception("DISABLE_NETWORK succeeded unexpectedly(2)")
if "FAIL" not in dev[1].request("REMOVE_NETWORK 1234567"):
raise Exception("REMOVE_NETWORK succeeded unexpectedly")
dev[1].global_request("REMOVE_NETWORK all")
if len(dev[1].list_networks(p2p=True)) > 0:
raise Exception("Unexpected network block remaining")
invite(dev[0], dev[1])
ev = dev[0].wait_global_event(["P2P-INVITATION-RESULT"], timeout=10)
if ev is None:
raise Exception("No invitation result seen")
if "status=8" not in ev:
raise Exception("Unexpected invitation result: " + ev)
clients = dev[0].request("GET_NETWORK " + id + " p2p_client_list").rstrip()
if dev[1].p2p_dev_addr() in clients:
raise Exception("Peer was still in client list")
@remote_compatible
def test_persistent_group2(dev):
"""P2P persistent group formation with reverse roles"""
form(dev[0], dev[1], reverse_init=True)
invite_from_cli(dev[0], dev[1])
invite_from_go(dev[0], dev[1])
@remote_compatible
def test_persistent_group3(dev):
"""P2P persistent group formation and re-invocation with empty BSS table"""
form(dev[0], dev[1])
dev[1].request("BSS_FLUSH 0")
invite_from_cli(dev[0], dev[1])
dev[1].request("BSS_FLUSH 0")
invite_from_go(dev[0], dev[1])
def test_persistent_group_per_sta_psk(dev):
"""P2P persistent group formation and re-invocation using per-client PSK"""
addr0 = dev[0].p2p_dev_addr()
addr1 = dev[1].p2p_dev_addr()
addr2 = dev[2].p2p_dev_addr()
dev[0].global_request("P2P_SET per_sta_psk 1")
logger.info("Form a persistent group")
[i_res, r_res] = go_neg_pin_authorized_persistent(i_dev=dev[0], i_intent=15,
r_dev=dev[1], r_intent=0)
if not i_res['persistent'] or not r_res['persistent']:
raise Exception("Formed group was not persistent")
logger.info("Join another client to the group")
pin = dev[2].wps_read_pin()
dev[0].p2p_go_authorize_client(pin)
social = int(i_res['freq']) in [ 2412, 2437, 2462 ]
c_res = dev[2].p2p_connect_group(addr0, pin, timeout=60, social=social,
freq=i_res['freq'])
if not c_res['persistent']:
raise Exception("Joining client did not recognize persistent group")
if r_res['psk'] == c_res['psk']:
raise Exception("Same PSK assigned for both clients")
hwsim_utils.test_connectivity_p2p(dev[1], dev[2])
logger.info("Remove persistent group and re-start it manually")
dev[0].remove_group()
dev[1].wait_go_ending_session()
dev[2].wait_go_ending_session()
dev[0].dump_monitor()
dev[1].dump_monitor()
dev[2].dump_monitor()
for i in range(0, 3):
networks = dev[i].list_networks(p2p=True)
if len(networks) != 1:
raise Exception("Unexpected number of networks")
if "[P2P-PERSISTENT]" not in networks[0]['flags']:
raise Exception("Not the persistent group data")
if i > 0:
# speed up testing by avoiding use of the old BSS entry since the
# GO may have changed channels
dev[i].request("BSS_FLUSH 0")
dev[i].scan(freq="2412", only_new=True)
if "OK" not in dev[i].global_request("P2P_GROUP_ADD persistent=" + networks[0]['id'] + " freq=2412"):
raise Exception("Could not re-start persistent group")
ev = dev[i].wait_global_event(["P2P-GROUP-STARTED"], timeout=30)
if ev is None:
raise Exception("Timeout on group restart")
dev[i].group_form_result(ev)
logger.info("Leave persistent group and rejoin it")
dev[2].remove_group()
ev = dev[2].wait_global_event(["P2P-GROUP-REMOVED"], timeout=3)
if ev is None:
raise Exception("Group removal event timed out")
if not dev[2].discover_peer(addr0, social=True):
raise Exception("Peer " + peer + " not found")
dev[2].dump_monitor()
peer = dev[2].get_peer(addr0)
dev[2].global_request("P2P_GROUP_ADD persistent=" + peer['persistent'] + " freq=2412")
ev = dev[2].wait_global_event(["P2P-GROUP-STARTED"], timeout=30)
if ev is None:
raise Exception("Timeout on group restart (on client)")
cli_res = dev[2].group_form_result(ev)
if not cli_res['persistent']:
raise Exception("Persistent group not restarted as persistent (cli)")
hwsim_utils.test_connectivity_p2p(dev[1], dev[2])
logger.info("Remove one of the clients from the group without removing persistent group information for the client")
dev[0].global_request("P2P_REMOVE_CLIENT iface=" + dev[2].p2p_interface_addr())
dev[2].wait_go_ending_session()
logger.info("Try to reconnect after having been removed from group (but persistent group info still present)")
if not dev[2].discover_peer(addr0, social=True):
raise Exception("Peer " + peer + " not found")
dev[2].dump_monitor()
peer = dev[2].get_peer(addr0)
dev[2].global_request("P2P_GROUP_ADD persistent=" + peer['persistent'] + " freq=2412")
ev = dev[2].wait_global_event(["P2P-GROUP-STARTED","WPA: 4-Way Handshake failed"], timeout=30)
if ev is None:
raise Exception("Timeout on group restart (on client)")
if "P2P-GROUP-STARTED" not in ev:
raise Exception("Connection failed")
logger.info("Remove one of the clients from the group")
dev[0].global_request("P2P_REMOVE_CLIENT " + addr2)
dev[2].wait_go_ending_session()
logger.info("Try to reconnect after having been removed from group")
if not dev[2].discover_peer(addr0, social=True):
raise Exception("Peer " + peer + " not found")
dev[2].dump_monitor()
peer = dev[2].get_peer(addr0)
dev[2].global_request("P2P_GROUP_ADD persistent=" + peer['persistent'] + " freq=2412")
ev = dev[2].wait_global_event(["P2P-GROUP-STARTED","WPA: 4-Way Handshake failed"], timeout=30)
if ev is None:
raise Exception("Timeout on group restart (on client)")
if "P2P-GROUP-STARTED" in ev:
raise Exception("Client managed to connect after being removed")
logger.info("Remove the remaining client from the group")
dev[0].global_request("P2P_REMOVE_CLIENT " + addr1)
dev[1].wait_go_ending_session()
logger.info("Terminate persistent group")
dev[0].remove_group()
dev[0].dump_monitor()
logger.info("Try to re-invoke persistent group from client")
dev[0].global_request("SET persistent_reconnect 1")
dev[0].p2p_listen()
if not dev[1].discover_peer(addr0, social=True):
raise Exception("Peer " + peer + " not found")
dev[1].dump_monitor()
peer = dev[1].get_peer(addr0)
dev[1].global_request("P2P_INVITE persistent=" + peer['persistent'] + " peer=" + addr0)
ev = dev[0].wait_global_event(["P2P-GROUP-STARTED"], timeout=30)
dev[0].group_form_result(ev)
ev = dev[1].wait_global_event(["P2P-GROUP-STARTED","WPA: 4-Way Handshake failed"], timeout=30)
if ev is None:
raise Exception("Timeout on group restart (on client)")
if "P2P-GROUP-STARTED" in ev:
raise Exception("Client managed to re-invoke after being removed")
dev[0].dump_monitor()
logger.info("Terminate persistent group")
dev[0].remove_group()
dev[0].dump_monitor()
def test_persistent_group_invite_removed_client(dev):
"""P2P persistent group client removal and re-invitation"""
addr0 = dev[0].p2p_dev_addr()
addr1 = dev[1].p2p_dev_addr()
dev[0].request("P2P_SET per_sta_psk 1")
logger.info("Form a persistent group")
[i_res, r_res] = go_neg_pin_authorized_persistent(i_dev=dev[0], i_intent=15,
r_dev=dev[1], r_intent=0)
if not i_res['persistent'] or not r_res['persistent']:
raise Exception("Formed group was not persistent")
logger.info("Remove client from the group")
dev[0].global_request("P2P_REMOVE_CLIENT " + addr1)
dev[1].wait_go_ending_session()
logger.info("Re-invite the removed client to join the group")
dev[1].p2p_listen()
if not dev[0].discover_peer(addr1, social=True):
raise Exception("Peer " + peer + " not found")
dev[0].global_request("P2P_INVITE group=" + dev[0].group_ifname + " peer=" + addr1)
ev = dev[1].wait_global_event(["P2P-INVITATION-RECEIVED"], timeout=10)
if ev is None:
raise Exception("Timeout on invitation")
if "sa=" + addr0 + " persistent=" not in ev:
raise Exception("Unexpected invitation event")
[event,addr,persistent] = ev.split(' ', 2)
dev[1].global_request("P2P_GROUP_ADD " + persistent)
ev = dev[1].wait_global_event(["P2P-PERSISTENT-PSK-FAIL"], timeout=30)
if ev is None:
raise Exception("Did not receive PSK failure report")
[tmp,id] = ev.split('=', 1)
ev = dev[1].wait_global_event(["P2P-GROUP-REMOVED"], timeout=10)
if ev is None:
raise Exception("Group removal event timed out")
if "reason=PSK_FAILURE" not in ev:
raise Exception("Unexpected group removal reason")
dev[1].global_request("REMOVE_NETWORK " + id)
logger.info("Re-invite after client removed persistent group info")
dev[1].p2p_listen()
if not dev[0].discover_peer(addr1, social=True):
raise Exception("Peer " + peer + " not found")
dev[0].global_request("P2P_INVITE group=" + dev[0].group_ifname + " peer=" + addr1)
ev = dev[1].wait_global_event(["P2P-INVITATION-RECEIVED"], timeout=10)
if ev is None:
raise Exception("Timeout on invitation")
if " persistent=" in ev:
raise Exception("Unexpected invitation event")
pin = dev[1].wps_read_pin()
dev[0].p2p_go_authorize_client(pin)
c_res = dev[1].p2p_connect_group(addr0, pin, timeout=60, social=True,
freq=i_res['freq'])
if not c_res['persistent']:
raise Exception("Joining client did not recognize persistent group")
if r_res['psk'] == c_res['psk']:
raise Exception("Same PSK assigned on both times")
hwsim_utils.test_connectivity_p2p(dev[0], dev[1])
terminate_group(dev[0], dev[1])
@remote_compatible
def test_persistent_group_channel(dev):
"""P2P persistent group re-invocation with channel selection"""
form(dev[0], dev[1], test_data=False)
logger.info("Re-invoke persistent group from client with forced channel")
invite(dev[1], dev[0], "freq=2427")
[go_res, cli_res] = check_result(dev[0], dev[1])
if go_res['freq'] != "2427":
raise Exception("Persistent group client forced channel not followed")
terminate_group(dev[0], dev[1])
logger.info("Re-invoke persistent group from GO with forced channel")
invite(dev[0], dev[1], "freq=2432")
[go_res, cli_res] = check_result(dev[0], dev[1])
if go_res['freq'] != "2432":
raise Exception("Persistent group GO channel preference not followed")
terminate_group(dev[0], dev[1])
logger.info("Re-invoke persistent group from client with channel preference")
invite(dev[1], dev[0], "pref=2417")
[go_res, cli_res] = check_result(dev[0], dev[1])
if go_res['freq'] != "2417":
raise Exception("Persistent group client channel preference not followed")
terminate_group(dev[0], dev[1])
@remote_compatible
def test_persistent_group_and_role_change(dev):
"""P2P persistent group, auto GO in another role, and re-invocation"""
form(dev[0], dev[1])
logger.info("Start and stop autonomous GO on previous P2P client device")
dev[1].p2p_start_go()
dev[1].remove_group()
dev[1].dump_monitor()
logger.info("Re-invoke the persistent group")
invite_from_go(dev[0], dev[1])
def test_persistent_go_client_list(dev):
"""P2P GO and list of clients in persistent group"""
addr0 = dev[0].p2p_dev_addr()
addr1 = dev[1].p2p_dev_addr()
addr2 = dev[2].p2p_dev_addr()
res = dev[0].p2p_start_go(persistent=True)
id = None
for n in dev[0].list_networks(p2p=True):
if "[P2P-PERSISTENT]" in n['flags']:
id = n['id']
break
if id is None:
raise Exception("Could not find persistent group entry")
connect_cli(dev[0], dev[1], social=True, freq=res['freq'])
clients = dev[0].global_request("GET_NETWORK " + id + " p2p_client_list").rstrip()
if clients != addr1:
raise Exception("Unexpected p2p_client_list entry(2): " + clients)
connect_cli(dev[0], dev[2], social=True, freq=res['freq'])
clients = dev[0].global_request("GET_NETWORK " + id + " p2p_client_list").rstrip()
if clients != addr2 + " " + addr1:
raise Exception("Unexpected p2p_client_list entry(3): " + clients)
peer = dev[1].get_peer(res['go_dev_addr'])
dev[1].remove_group()
dev[1].global_request("P2P_GROUP_ADD persistent=" + peer['persistent'])
ev = dev[1].wait_global_event(["P2P-GROUP-STARTED"], timeout=30)
if ev is None:
raise Exception("Timeout on group restart (on client)")
dev[1].group_form_result(ev)
clients = dev[0].global_request("GET_NETWORK " + id + " p2p_client_list").rstrip()
if clients != addr1 + " " + addr2:
raise Exception("Unexpected p2p_client_list entry(4): " + clients)
dev[2].remove_group()
dev[1].remove_group()
dev[0].remove_group()
clients = dev[0].global_request("GET_NETWORK " + id + " p2p_client_list").rstrip()
if clients != addr1 + " " + addr2:
raise Exception("Unexpected p2p_client_list entry(5): " + clients)
dev[1].p2p_listen()
dev[2].p2p_listen()
dev[0].request("P2P_FLUSH")
dev[0].discover_peer(addr1, social=True)
peer = dev[0].get_peer(addr1)
if 'persistent' not in peer or peer['persistent'] != id:
raise Exception("Persistent group client not recognized(1)")
dev[0].discover_peer(addr2, social=True)
peer = dev[0].get_peer(addr2)
if 'persistent' not in peer or peer['persistent'] != id:
raise Exception("Persistent group client not recognized(2)")
@remote_compatible
def test_persistent_group_in_grpform(dev):
"""P2P persistent group parameters re-used in group formation"""
addr0 = dev[0].p2p_dev_addr()
addr1 = dev[1].p2p_dev_addr()
form(dev[0], dev[1])
dev[1].p2p_listen()
if not dev[0].discover_peer(addr1, social=True):
raise Exception("Could not discover peer")
peer = dev[0].get_peer(addr1)
if "persistent" not in peer:
raise Exception("Could not map peer to a persistent group")
pin = dev[1].wps_read_pin()
dev[1].p2p_go_neg_auth(addr0, pin, "display", go_intent=0)
i_res = dev[0].p2p_go_neg_init(addr1, pin, "enter", timeout=20,
go_intent=15,
persistent_id=peer['persistent'])
r_res = dev[1].p2p_go_neg_auth_result()
logger.debug("i_res: " + str(i_res))
logger.debug("r_res: " + str(r_res))
@remote_compatible
def test_persistent_group_without_persistent_reconnect(dev):
"""P2P persistent group re-invocation without persistent reconnect"""
form(dev[0], dev[1])
dev[0].dump_monitor()
dev[1].dump_monitor()
logger.info("Re-invoke persistent group from client")
invite(dev[1], dev[0], persistent_reconnect=False)
ev = dev[0].wait_global_event(["P2P-INVITATION-RECEIVED"], timeout=15)
if ev is None:
raise Exception("No invitation request reported")
if "persistent=" not in ev:
raise Exception("Invalid invitation type reported: " + ev)
ev2 = dev[1].wait_global_event(["P2P-INVITATION-RESULT"], timeout=15)
if ev2 is None:
raise Exception("No invitation response reported")
if "status=1" not in ev2:
raise Exception("Unexpected status: " + ev2)
dev[1].p2p_listen()
exp = r'<.>(P2P-INVITATION-RECEIVED) sa=([0-9a-f:]*) persistent=([0-9]*) freq=([0-9]*)'
s = re.split(exp, ev)
if len(s) < 5:
raise Exception("Could not parse invitation event")
sa = s[2]
id = s[3]
freq = s[4]
logger.info("Invalid P2P_INVITE test coverage")
if "FAIL" not in dev[0].global_request("P2P_INVITE persistent=" + id + " peer=" + sa + " freq=0"):
raise Exception("Invalid P2P_INVITE accepted")
if "FAIL" not in dev[0].global_request("P2P_INVITE persistent=" + id + " peer=" + sa + " pref=0"):
raise Exception("Invalid P2P_INVITE accepted")
logger.info("Re-initiate invitation based on upper layer acceptance")
if "OK" not in dev[0].global_request("P2P_INVITE persistent=" + id + " peer=" + sa + " freq=" + freq):
raise Exception("Invitation command failed")
[go_res, cli_res] = check_result(dev[0], dev[1])
if go_res['freq'] != freq:
raise Exception("Unexpected channel on GO: {} MHz, expected {} MHz".format(go_res['freq'], freq))
if cli_res['freq'] != freq:
raise Exception("Unexpected channel on CLI: {} MHz, expected {} MHz".format(cli_res['freq'], freq))
terminate_group(dev[0], dev[1])
dev[0].dump_monitor()
dev[1].dump_monitor()
logger.info("Re-invoke persistent group from GO")
invite(dev[0], dev[1], persistent_reconnect=False)
ev = dev[1].wait_global_event(["P2P-INVITATION-RECEIVED"], timeout=15)
if ev is None:
raise Exception("No invitation request reported")
if "persistent=" not in ev:
raise Exception("Invalid invitation type reported: " + ev)
ev2 = dev[0].wait_global_event(["P2P-INVITATION-RESULT"], timeout=15)
if ev2 is None:
raise Exception("No invitation response reported")
if "status=1" not in ev2:
raise Exception("Unexpected status: " + ev2)
dev[0].p2p_listen()
exp = r'<.>(P2P-INVITATION-RECEIVED) sa=([0-9a-f:]*) persistent=([0-9]*)'
s = re.split(exp, ev)
if len(s) < 4:
raise Exception("Could not parse invitation event")
sa = s[2]
id = s[3]
logger.info("Re-initiate invitation based on upper layer acceptance")
if "OK" not in dev[1].global_request("P2P_INVITE persistent=" + id + " peer=" + sa + " freq=" + freq):
raise Exception("Invitation command failed")
[go_res, cli_res] = check_result(dev[0], dev[1])
terminate_group(dev[0], dev[1])
@remote_compatible
def test_persistent_group_already_running(dev):
"""P2P persistent group formation and invitation while GO already running"""
form(dev[0], dev[1])
peer = dev[1].get_peer(dev[0].p2p_dev_addr())
listen_freq = peer['listen_freq']
dev[0].dump_monitor()
dev[1].dump_monitor()
networks = dev[0].list_networks(p2p=True)
if len(networks) != 1:
raise Exception("Unexpected number of networks")
if "[P2P-PERSISTENT]" not in networks[0]['flags']:
raise Exception("Not the persistent group data")
if "OK" not in dev[0].global_request("P2P_GROUP_ADD persistent=" + networks[0]['id'] + " freq=" + listen_freq):
raise Exception("Could not state GO")
invite_from_cli(dev[0], dev[1])
@remote_compatible
def test_persistent_group_add_cli_chan(dev):
"""P2P persistent group formation and re-invocation with p2p_add_cli_chan=1"""
try:
dev[0].request("SET p2p_add_cli_chan 1")
dev[1].request("SET p2p_add_cli_chan 1")
form(dev[0], dev[1])
dev[1].request("BSS_FLUSH 0")
dev[1].scan(freq="2412", only_new=True)
dev[1].scan(freq="2437", only_new=True)
dev[1].scan(freq="2462", only_new=True)
dev[1].request("BSS_FLUSH 0")
invite_from_cli(dev[0], dev[1])
invite_from_go(dev[0], dev[1])
finally:
dev[0].request("SET p2p_add_cli_chan 0")
dev[1].request("SET p2p_add_cli_chan 0")
@remote_compatible
def test_persistent_invalid_group_add(dev):
"""Invalid P2P_GROUP_ADD command"""
id = dev[0].add_network()
if "FAIL" not in dev[0].global_request("P2P_GROUP_ADD persistent=12345"):
raise Exception("Invalid P2P_GROUP_ADD accepted")
if "FAIL" not in dev[0].global_request("P2P_GROUP_ADD persistent=%d" % id):
raise Exception("Invalid P2P_GROUP_ADD accepted")
if "FAIL" not in dev[0].global_request("P2P_GROUP_ADD foo"):
raise Exception("Invalid P2P_GROUP_ADD accepted")
def test_persistent_group_missed_inv_resp(dev):
"""P2P persistent group re-invocation with invitation response getting lost"""
form(dev[0], dev[1])
addr = dev[1].p2p_dev_addr()
dev[1].global_request("SET persistent_reconnect 1")
dev[1].p2p_listen()
if not dev[0].discover_peer(addr, social=True):
raise Exception("Peer " + addr + " not found")
dev[0].dump_monitor()
peer = dev[0].get_peer(addr)
# Drop the first Invitation Response frame
if "FAIL" in dev[0].request("SET ext_mgmt_frame_handling 1"):
raise Exception("Failed to enable external management frame handling")
cmd = "P2P_INVITE persistent=" + peer['persistent'] + " peer=" + addr
dev[0].global_request(cmd)
rx_msg = dev[0].mgmt_rx()
if rx_msg is None:
raise Exception("MGMT-RX timeout (no Invitation Response)")
time.sleep(2)
# Allow following Invitation Response frame to go through
if "FAIL" in dev[0].request("SET ext_mgmt_frame_handling 0"):
raise Exception("Failed to disable external management frame handling")
time.sleep(1)
# Force the P2P Client side to be on its Listen channel for retry
dev[1].p2p_listen()
ev = dev[0].wait_global_event(["P2P-INVITATION-RESULT"], timeout=15)
if ev is None:
raise Exception("Invitation result timed out")
# Allow P2P Client side to continue connection-to-GO attempts
dev[1].p2p_stop_find()
# Verify that group re-invocation goes through
ev = dev[1].wait_global_event([ "P2P-GROUP-STARTED",
"P2P-GROUP-FORMATION-FAILURE" ],
timeout=20)
if ev is None:
raise Exception("Group start event timed out")
if "P2P-GROUP-STARTED" not in ev:
raise Exception("Group re-invocation failed")
dev[0].group_form_result(ev)
ev = dev[0].wait_global_event([ "P2P-GROUP-STARTED" ], timeout=5)
if ev is None:
raise Exception("Group start event timed out on GO")
dev[0].group_form_result(ev)
terminate_group(dev[0], dev[1])
@remote_compatible
def test_persistent_group_profile_add(dev):
"""Create a P2P persistent group with ADD_NETWORK"""
passphrase="passphrase here"
id = dev[0].p2pdev_add_network()
dev[0].p2pdev_set_network_quoted(id, "ssid", "DIRECT-ab")
dev[0].p2pdev_set_network_quoted(id, "psk", passphrase)
dev[0].p2pdev_set_network(id, "mode", "3")
dev[0].p2pdev_set_network(id, "disabled", "2")
dev[0].p2p_start_go(persistent=id, freq=2412)
pin = dev[1].wps_read_pin()
dev[0].p2p_go_authorize_client(pin)
res = dev[1].p2p_connect_group(dev[0].p2p_dev_addr(), pin, timeout=60,
social=True, freq=2412)
if res['result'] != 'success':
raise Exception("Joining the group did not succeed")
dev[0].remove_group()
dev[1].wait_go_ending_session()
@remote_compatible
def test_persistent_group_cancel_on_cli(dev):
"""P2P persistent group formation, re-invocation, and cancel"""
dev[0].global_request("SET p2p_no_group_iface 0")
dev[1].global_request("SET p2p_no_group_iface 0")
form(dev[0], dev[1])
invite_from_go(dev[0], dev[1], terminate=False)
if "FAIL" not in dev[1].global_request("P2P_CANCEL"):
raise Exception("P2P_CANCEL succeeded unexpectedly on CLI")
if "FAIL" not in dev[0].global_request("P2P_CANCEL"):
raise Exception("P2P_CANCEL succeeded unexpectedly on GO")
terminate_group(dev[0], dev[1])
invite_from_cli(dev[0], dev[1], terminate=False)
if "FAIL" not in dev[1].global_request("P2P_CANCEL"):
raise Exception("P2P_CANCEL succeeded unexpectedly on CLI")
if "FAIL" not in dev[0].global_request("P2P_CANCEL"):
raise Exception("P2P_CANCEL succeeded unexpectedly on GO")
terminate_group(dev[0], dev[1])
@remote_compatible
def test_persistent_group_cancel_on_cli2(dev):
"""P2P persistent group formation, re-invocation, and cancel (2)"""
form(dev[0], dev[1])
invite_from_go(dev[0], dev[1], terminate=False)
if "FAIL" not in dev[1].global_request("P2P_CANCEL"):
raise Exception("P2P_CANCEL succeeded unexpectedly on CLI")
if "FAIL" not in dev[0].global_request("P2P_CANCEL"):
raise Exception("P2P_CANCEL succeeded unexpectedly on GO")
terminate_group(dev[0], dev[1])
invite_from_cli(dev[0], dev[1], terminate=False)
if "FAIL" not in dev[1].global_request("P2P_CANCEL"):
raise Exception("P2P_CANCEL succeeded unexpectedly on CLI")
if "FAIL" not in dev[0].global_request("P2P_CANCEL"):
raise Exception("P2P_CANCEL succeeded unexpectedly on GO")
terminate_group(dev[0], dev[1])
@remote_compatible
def test_persistent_group_peer_dropped(dev):
"""P2P persistent group formation and re-invocation with peer having dropped group"""
form(dev[0], dev[1], reverse_init=True)
invite_from_cli(dev[0], dev[1])
logger.info("Remove group on the GO and try to invite from the client")
dev[0].global_request("REMOVE_NETWORK all")
invite(dev[1], dev[0])
ev = dev[1].wait_global_event(["P2P-INVITATION-RESULT"], timeout=10)
if ev is None:
raise Exception("No invitation result seen")
if "status=8" not in ev:
raise Exception("Unexpected invitation result: " + ev)
networks = dev[1].list_networks(p2p=True)
if len(networks) > 0:
raise Exception("Unexpected network block on client")
logger.info("Verify that a new group can be formed")
form(dev[0], dev[1], reverse_init=True)
@remote_compatible
def test_persistent_group_peer_dropped2(dev):
"""P2P persistent group formation and re-invocation with peer having dropped group (2)"""
form(dev[0], dev[1])
invite_from_go(dev[0], dev[1])
logger.info("Remove group on the client and try to invite from the GO")
dev[1].global_request("REMOVE_NETWORK all")
invite(dev[0], dev[1])
ev = dev[0].wait_global_event(["P2P-INVITATION-RESULT"], timeout=10)
if ev is None:
raise Exception("No invitation result seen")
if "status=8" not in ev:
raise Exception("Unexpected invitation result: " + ev)
networks = dev[1].list_networks(p2p=True)
if len(networks) > 0:
raise Exception("Unexpected network block on client")
logger.info("Verify that a new group can be formed")
form(dev[0], dev[1])
def test_persistent_group_peer_dropped3(dev):
"""P2P persistent group formation and re-invocation with peer having dropped group (3)"""
form(dev[0], dev[1], reverse_init=True)
invite_from_cli(dev[0], dev[1])
logger.info("Remove group on the GO and try to invite from the client")
dev[0].global_request("REMOVE_NETWORK all")
invite(dev[1], dev[0], use_listen=False)
ev = dev[1].wait_global_event(["P2P-INVITATION-RESULT"], timeout=10)
if ev is None:
raise Exception("No invitation result seen")
if "status=8" not in ev:
raise Exception("Unexpected invitation result: " + ev)
networks = dev[1].list_networks(p2p=True)
if len(networks) > 0:
raise Exception("Unexpected network block on client")
time.sleep(0.2)
logger.info("Verify that a new group can be formed")
form(dev[0], dev[1], reverse_init=True, r_listen=False)
|
c4af4e6f2e0818472233e2f5f6a02fa01d196cf6
|
5ef6c8d47864f471e26b9902d61f8c687e941f05
|
/src/genie/libs/parser/comware/show_interface.py
|
65433a0c1a4e90f4f2d8aabe69b0af30def6ccf8
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genieparser
|
169c196558f1c1a0f0d10650876096f993224917
|
b531eff760b2e44cd69d7a2716db6f866907c239
|
refs/heads/master
| 2023-09-03T08:56:18.831340
| 2023-08-29T22:32:02
| 2023-08-29T22:32:02
| 131,621,824
| 247
| 409
|
Apache-2.0
| 2023-08-29T22:32:04
| 2018-04-30T16:51:50
|
Python
|
UTF-8
|
Python
| false
| false
| 37,199
|
py
|
show_interface.py
|
'''
Author: Renato Almeida de Oliveira
Contact: renato.almeida.oliveira@gmail.com
'''
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import Any, Or, Optional
from genie.libs.parser.utils.common import Common
from genie import parsergen
from genie.libs.parser.iosxe.show_interface import ShowInterfacesSchema
import re
# ======================================================
# Schema for 'display ip interface'
# ======================================================
class DisplayInterfaceSchema(MetaParser):
"""schema for display interface
display interface <interface>
"""
schema = {
Any(): {
'oper_status': str,
'enabled': bool,
Optional('description'): str,
'type': str,
Optional('frame_type'): str,
Optional('ipv6_frame_type'): str,
Optional('port_speed'): str,
Optional('duplex_mode'): str,
Optional('media_type'): str,
Optional('port_type'): str,
Optional('mtu'): int,
Optional('max_frame_length'): int,
Optional('pvid'): int,
Optional('mac_address'): str,
Optional('ipv6_mac_address'): str,
Optional('auto_negotiate'): bool,
Optional('priority'): int,
Optional('counters'):
{Optional('rate'):
{Optional('load_interval'): int,
Optional('in_rate_pkts'): int,
Optional('out_rate_pkts'): int,
Optional('in_rate_bytes'): int,
Optional('out_rate_bytes'): int,
},
Optional('normal'):
{Optional('in_pkts'): int,
Optional('in_octets'): int,
Optional('out_pkts'): int,
Optional('out_octets'): int,
Optional('in_unicast_pkts'): int,
Optional('in_broadcast_pkts'): int,
Optional('in_multicast_pkts'): int,
Optional('in_mac_pause_frames'): int,
Optional('out_unicast_pkts'): int,
Optional('out_broadcast_pkts'): int,
Optional('out_multicast_pkts'): int,
Optional('out_mac_pause_frames'): int,
},
Optional('out_unicast_pkts'): int,
Optional('out_errors'): int,
Optional('out_collision'): int,
Optional('out_lost_carrier'): int,
Optional('out_no_carrier'): int,
Optional('in_multicast_pkts'): int,
Optional('in_unicast_pkts'): int,
Optional('out_broadcast_pkts'): int,
Optional('out_abort'): int,
Optional('in_errors'): int,
Optional('in_parity_errors'): int,
Optional('in_ignored'): int,
Optional('in_throttles'): int,
Optional('in_overrun'): int,
Optional('out_mac_pause_frames'): int,
Optional('out_deferred'): int,
Optional('in_mac_pause_frames'): int,
Optional('out_octets'): int,
Optional('in_octets'): int,
Optional('in_runts'): int,
Optional('out_multicast_pkts'): int,
Optional('in_frame'): int,
Optional('in_broadcast_pkts'): int,
Optional('out_buffer_failure'): int,
Optional('out_pkts'): int,
Optional('out_late_collision'): int,
Optional('in_giants'): int,
Optional('out_underruns'): int,
Optional('in_crc_errors'): int,
Optional('in_abort'): int,
Optional('in_pkts'): int,
Optional('last_clear'): str,
},
Optional('switchport'): {
Optional('mode'): str,
Optional('tagged'): int,
Optional('untagged'): int,
Optional('vlan_passing'): list,
Optional('vlan_permitted'): list,
Optional('encapsulation'): str,
},
Optional('ipv4'):
{Any():
{Optional('ip'): str,
Optional('prefix_length'): str,
Optional('secondary'): bool
}
},
}
}
class DisplayInterfaces(DisplayInterfaceSchema):
"""parser for display interface
display interface <interface>
"""
##########################################################
# Other Plataforms equivalent command
##########################################################
platform_equivalent_cli = ['show interfaces',
'show interfaces {interface}']
##########################################################
# HP Comware command
##########################################################
platform_cli = ['display interface',
'display interface {interface}']
cli_command = platform_equivalent_cli + platform_cli
exclude = []
def cli(self, interface="", output=None):
if output is None:
if interface:
cmd = self.platform_cli[1].format(interface=interface)
else:
cmd = self.platform_cli[0]
out = self.device.execute(cmd)
else:
out = output
# GigabitEthernet3/8/0/38 current state: DOWN
p1 = re.compile(r'^ *(?P<interface>[\w\/\.\-]+) current state: (?P<enabled>[\(?\w\s\)?]+)$')
# IP Packet Frame Type: PKTFMT_ETHNT_2, Hardware Address: aaaa-bbbb-cccc
p2_0 = re.compile(r'^IP Packet Frame Type: (?P<frame_type>\w+), +Hardware Address: (?P<mac_address>[a-z0-9-]+)$')
# IPv6 Packet Frame Type: PKTFMT_ETHNT_2, Hardware Address: aaaa-bbbb-cccc
p2_1 = re.compile(r'^IPv6 Packet Frame Type: (?P<frame_type>\w+), +Hardware Address: (?P<mac_address>[a-z0-9-]+)$')
# Description:
p3 = re.compile(r'^Description: *(?P<description>.*)$')
# Media type is twisted pair
p4 = re.compile(r'^Media type is (?P<media_type>.*?)($|,.*$)')
# Port hardware type is 1000_BASE_T
p5 = re.compile(r'Port hardware type is\s+(?P<port_type>[\w+_?]+)')
# Unknown-speed mode, unknown-duplex mode
p6 = re.compile(r'^(?P<port_speed>\w+[Mbps]?)-speed mode, (?P<duplex_mode>\w+)[\-\s]+[d|D]uplex mode$')
# Link speed type is autonegotiation, link duplex type is autonegotiation
p7 = re.compile(r'^Link speed type is (?P<speed_type>\w+), link duplex type is (?P<duplex_type>\w+)$')
# The Maximum Frame Length is 9216
p8_0 = re.compile(r'^The Maximum Frame Length is (?P<max_frame_length>\d+)$')
p8_1 = re.compile(r'^The Maximum Transmit Unit is (?P<mtu>\d+)$')
# Internet Address is 192.168.0.1/24 Primary
p9 = re.compile(r'^Internet *Address *is *(?P<ipv4>(?P<ip>[0-9\.x]+)\/(?P<prefix_length>[0-9]+)) (?P<type>\w+)$')
# PVID: 17
p10 = re.compile(r'^PVID: *(?P<pvid>\d+)$')
# Port link-type: access
p11 = re.compile(r'^\s*Port link-type: (?P<switchport_mode>\w+)$')
# Tagged VLAN ID : none
p12 = re.compile(r'^Tagged +VLAN ID : (?P<tagged>\w+)$')
# Untagged VLAN ID : 123
p13 = re.compile(r'^Untagged +VLAN ID : (?P<untagged>\w+)$')
# VLAN passing : 1(default vlan), 3, 5, 7, 9
p14 = re.compile(r'^ *VLAN passing *: (?P<passing>(.*))$')
# VLAN permitted : 1(default vlan), 3, 5, 7, 9
p15 = re.compile(r'^ *VLAN permitted *: (?P<permitted>(.*))$')
# Trunk port encapsulation: IEEE 802.1q
p16 = re.compile(r'^ *Trunk port encapsulation: (?P<encapsulation>.*)$')
# Port priority: 0
p17 = re.compile(r'^ *Port priority: (?P<priority>\d+)$')
# Last clearing of counters: Never
p18 = re.compile(r'^ *Last clearing of counters: *(?P<last_clear>.*)$')
# Last 300 seconds input: 0 packets/sec 0 bytes/sec -%
p19 = re.compile(r'Last (?P<load_interval>[0-9\#]+) *(?P<unit>(minute|second|minutes|seconds)) input: *(?P<in_rate_pkts>[0-9]+) packets\/sec *(?P<in_rate_bytes>[0-9]+) *bytes\/sec *.*%$')
# Last 300 seconds output: 0 packets/sec 0 bytes/sec -%
p20 = re.compile(r'Last (?P<load_interval>[0-9\#]+) *(?P<unit>(minute|second|minutes|seconds)) output: *(?P<out_rate_pkts>[0-9]+) packets\/sec *(?P<out_rate_bytes>[0-9]+) *bytes\/sec *.*%$')
# Input (total): 7446905 packets, 10280397282 bytes
p21_0 = re.compile(r'^ *Input \((?P<type>\w+)\): *(?P<packets>.*) packets, (?P<bytes>.*) bytes$')
# Output (total): 40981139 packets, 44666966188 bytes
p21_1 = re.compile(r'^ *Output \((?P<type>\w+)\): *(?P<packets>.*) packets, (?P<bytes>.*) bytes$')
# 7426948 unicasts, 1093 broadcasts, 18864 multicasts, 0 pauses
p22 = re.compile(r'^ *(?P<unicasts>.*) unicasts, (?P<broadcasts>.*) broadcasts, (?P<multicasts>.*) multicasts, (?P<pauses>.*) pauses$')
# Input: 0 input errors, 0 runts, 0 giants, 0 throttles
p23 = re.compile(r'^ *Input: *(?P<in_errors>.*) input errors, (?P<in_runts>.*) runts, (?P<in_giants>.*) giants, (?P<in_throttles>.*) throttles$')
# 0 CRC, 0 frame, - overruns, 0 aborts
p24 = re.compile(r'^ *(?P<in_crc_errors>.*) CRC, (?P<in_frame>.*) frame, (?P<in_overrun>.*) overruns, (?P<in_abort>.*) aborts$')
# - ignored, - parity errors
p25 = re.compile(r'^ *(?P<in_ignored>.*) ignored, (?P<in_parity_errors>.*) parity errors$')
# Output: 0 output errors, - underruns, - buffer failures
p26 = re.compile(r'^ *Output: *(?P<out_errors>.*) output errors, (?P<out_underruns>.*) underruns, (?P<out_buffer_failure>.*) buffer failures$')
# aborts, 0 deferred, 0 collisions, 0 late collisions
p27 = re.compile(r'^ *(?P<out_abort>.*) aborts, (?P<out_deferred>.*) deferred, (?P<out_collision>.*) collisions, (?P<out_late_collision>.*) late collisions$')
# 0 lost carrier, - no carrier
p28 = re.compile(r'^ *(?P<out_lost_carrier>.*) lost carrier, (?P<out_no_carrier>.*) no carrier$')
interface_dict = {}
for line in out.splitlines():
line = line.strip()
# r'^ *(?P<interface>[\w\/\.\-]+) current state: (?P<enabled>[\(?\w\s\)?]+)$'
m = p1.match(line)
if m:
interface = m.groupdict()['interface']
if interface not in interface_dict:
interface_dict[interface] = {}
p_type = re.compile(r'(?P<type>[a-zA-Z\-\s\+]+)')
m_type = p_type.match(interface)
if_type = m_type.groupdict()['type']
interface_dict[interface]['type'] = if_type
enabled = m.groupdict()['enabled']
if 'DOWN ( Administratively )' in enabled:
interface_dict[interface]['enabled'] = False
interface_dict[interface]['oper_status'] = 'DOWN'
else:
interface_dict[interface]['enabled'] = True
interface_dict[interface]['oper_status'] = enabled
continue
# r'^IP Packet Frame Type: (?P<frame_type>\w+), +Hardware Address: (?P<mac_address>[a-z0-9-]+)$'
m = p2_0.match(line)
if m:
frame_type = m.groupdict()['frame_type']
mac_address = m.groupdict()['mac_address']
interface_dict[interface]['frame_type'] = frame_type
interface_dict[interface]['mac_address'] = mac_address
continue
# r'^IPv6 Packet Frame Type: (?P<frame_type>\w+), +Hardware Address: (?P<mac_address>[a-z0-9-]+)$'
m = p2_1.match(line)
if m:
frame_type = m.groupdict()['frame_type']
mac_address = m.groupdict()['mac_address']
interface_dict[interface]['ipv6_frame_type'] = frame_type
interface_dict[interface]['ipv6_mac_address'] = mac_address
continue
# r'^Description: *(?P<description>.*)$'
m = p3.match(line)
if m:
description = m.groupdict()['description']
interface_dict[interface]['description'] = description
continue
# r'^Media type is (?P<media_type>.*?)($|,.*$)'
m = p4.match(line)
if m:
media_type = m.groupdict()['media_type']
interface_dict[interface]['media_type'] = media_type
continue
# r'Port hardware type is\s+(?<port_type>[\w+_?]+)'
m = p5.match(line)
if m:
port_type = m.groupdict()['port_type']
interface_dict[interface]['port_type'] = port_type
continue
# r'^(?<port_speed>\w+[Mbps]?)-speed mode, (?P<duplex_mode>\w+)[\-\s]+[d|D]uplex mode$'
m = p6.match(line)
if m:
port_speed = m.groupdict()['port_speed'].lower()
duplex_mode = m.groupdict()['duplex_mode'].lower()
interface_dict[interface]['duplex_mode'] = duplex_mode
interface_dict[interface]['port_speed'] = port_speed
continue
# r'^Link speed type is (?P<speed_type>\w+), link duplex type is (?P<duplex_type>\w+)$'
m = p7.match(line)
if m:
speed_type = m.groupdict()['speed_type'].lower()
duplex_type = m.groupdict()['duplex_type'].lower()
if speed_type == 'autonegotiation' :
interface_dict[interface]['auto_negotiate'] = True
else:
interface_dict[interface]['auto_negotiate'] = False
continue
# r'^The Maximum Frame Length is (?P<max_frame_length>\d+)$'
m = p8_0.match(line)
if m:
max_frame_length = m.groupdict()['max_frame_length']
interface_dict[interface]['max_frame_length'] = int(max_frame_length)
continue
# r'^The Maximum Transmit Unit is (?P<mtu>\d+)$'
m = p8_1.match(line)
if m:
mtu = m.groupdict()['mtu']
interface_dict[interface]['mtu'] = int(mtu)
continue
# ^Internet *Address *is *(?P<ipv4>(?P<ip>[0-9\.x]+)\/(?P<prefix_length>[0-9]+)) (?P<type>\w+)$
m = p9.match(line)
if m:
ip_sec = m.groupdict()['ip']
prefix_length_sec = m.groupdict()['prefix_length']
address_sec = m.groupdict()['ipv4']
address_type = m.groupdict()['type']
if 'ipv4' not in interface_dict[interface]:
interface_dict[interface]['ipv4'] = {}
if address_sec not in interface_dict[interface]['ipv4']:
interface_dict[interface]['ipv4'][address_sec] = {}
if address_type != 'Primary':
interface_dict[interface]['ipv4'][address_sec]['secondary'] = True
interface_dict[interface]['ipv4'][address_sec]['ip'] = ip_sec
interface_dict[interface]['ipv4'][address_sec]['prefix_length'] = prefix_length_sec
continue
# r'^PVID: *(?P<pvid>\d+)$'
m = p10.match(line)
if m:
pvid = m.groupdict()['pvid']
interface_dict[interface]['pvid'] = int(pvid)
continue
# r'^\s*Port link-type: (?P<switchport_mode>\w+)$'
m = p11.match(line)
if m:
switchport_mode = m.groupdict()['switchport_mode']
if 'switchport' not in interface_dict[interface]:
interface_dict[interface]['switchport'] = {}
interface_dict[interface]['switchport']['mode'] = switchport_mode
continue
# r'^Tagged +VLAN ID : (?P<tagged>\w+)$'
m = p12.match(line)
if m:
tagged = m.groupdict()['tagged']
if(tagged != 'none'):
interface_dict[interface]['switchport']['tagged'] = int(tagged)
continue
# r'^Untagged +VLAN ID : (?P<untagged>\w+)$'
m = p13.match(line)
if m:
untagged = m.groupdict()['untagged']
if(untagged != 'none'):
interface_dict[interface]['switchport']['untagged'] = int(untagged)
continue
# r'VLAN passing *: (?P<passing>(.*))'
# VLAN passing : 1(default vlan), 2-4, 9-11, 17-18, 20, 23, 28-36
m = p14.match(line)
if m:
passing = m.groupdict()['passing']
passing = passing.split(', ')
vlans = []
for vlan in passing:
if (re.search('default vlan', vlan)):
vlans.append(1)
elif (re.search('-', vlan)):
init = int(vlan.split('-')[0])
end = int(vlan.split('-')[1]) + 1
vlans.extend(list(range(init, end)))
else:
vlans.append(int(vlan))
interface_dict[interface]['switchport']['vlan_passing'] = vlans
continue
# r'^ *VLAN permitted +: (?P<permitted>(.*))$'
m = p15.match(line)
if m:
permitted = m.groupdict()['permitted']
permitted = permitted.split(', ')
vlans = []
for vlan in permitted:
if (re.search('default vlan', vlan)):
vlans.append(1)
elif (re.search('-', vlan)):
init = int(vlan.split('-')[0])
end = int(vlan.split('-')[1]) + 1
vlans.extend(list(range(init, end)))
else:
vlans.append(int(vlan))
interface_dict[interface]['switchport']['vlan_permitted'] = vlans
continue
# r'^ *Trunk port encapsulation: (?P<encapsulation>.*)$'
m = p16.match(line)
if m:
encapsulation = m.groupdict()['encapsulation']
interface_dict[interface]['switchport']['encapsulation'] = encapsulation
continue
# r'^ *Port priority: (?P<priority>\d+)$'
m = p17.match(line)
if m:
priority = m.groupdict()['priority']
interface_dict[interface]['priority'] = int(priority)
continue
# r'^ *Last clearing of counters: *(?P<last_clear>.*)$'
m = p18.match(line)
if m:
last_clear = m.groupdict()['last_clear']
if 'counters' not in interface_dict[interface]:
interface_dict[interface]['counters'] = {}
interface_dict[interface]['counters']['last_clear'] = last_clear
continue
# r'Last (?P<load_interval>[0-9\#]+) *(?P<unit>(minute|second|minutes|seconds)) input: *(?P<in_rate_pkts>[0-9]+) packets\/sec *(?P<in_rate_bytes>[0-9]+) *bytes\/sec *.*%$'
m = p19.match(line)
if m:
load_interval = m.groupdict()['load_interval']
in_rate_pkts = m.groupdict()['in_rate_pkts']
in_rate_bytes = m.groupdict()['in_rate_bytes']
if 'counters' not in interface_dict[interface]:
interface_dict[interface]['counters'] = {}
if 'rate' not in interface_dict[interface]['counters']:
interface_dict[interface]['counters']['rate'] = {}
interface_dict[interface]['counters']['rate']['load_interval'] = int(load_interval)
interface_dict[interface]['counters']['rate']['in_rate_pkts'] = int(in_rate_pkts)
interface_dict[interface]['counters']['rate']['in_rate_bytes'] = int(in_rate_bytes)
continue
# r'Last (?P<load_interval>[0-9\#]+) *(?P<unit>(minute|second|minutes|seconds)) output: *(?P<out_rate_pkts>[0-9]+) packets\/sec *(?P<out_rate_bytes>[0-9]+) *bytes\/sec *.*%$'
m = p20.match(line)
if m:
out_rate_pkts = m.groupdict()['out_rate_pkts']
out_rate_bytes = m.groupdict()['out_rate_bytes']
if 'counters' not in interface_dict[interface]:
interface_dict[interface]['counters'] = {}
if 'rate' not in interface_dict[interface]['counters']:
interface_dict[interface]['counters']['rate'] = {}
interface_dict[interface]['counters']['rate']['out_rate_pkts'] = int(out_rate_pkts)
interface_dict[interface]['counters']['rate']['out_rate_bytes'] = int(out_rate_bytes)
continue
# r'^ *Input \((?P<type>\w+)\): *(?P<packets>.*) packets, (?P<bytes>.*) bytes$'
m = p21_0.match(line)
if m:
inout = 'in'
inout_type = m.groupdict()['type']
packets = m.groupdict()['packets']
octets = m.groupdict()['bytes']
if(packets == '-'):
packets = 0
if(octets == '-'):
octets = 0
if 'counters' not in interface_dict[interface]:
interface_dict[interface]['counters'] = {}
if inout_type == 'total':
interface_dict[interface]['counters'][inout + "_pkts"] = int(packets)
interface_dict[interface]['counters'][inout + "_octets"] = int(octets)
elif inout_type == 'normal':
if 'normal' not in interface_dict[interface]['counters']:
interface_dict[interface]['counters']['normal'] = {}
interface_dict[interface]['counters']['normal'][inout + "_pkts"] = int(packets)
interface_dict[interface]['counters']['normal'][inout + "_octets"] = int(octets)
continue
# r'^ *Output \((?P<type>\w+)\): *(?P<packets>.*) packets, (?P<bytes>.*) bytes$'
m = p21_1.match(line)
if m:
inout = 'out'
inout_type = m.groupdict()['type']
packets = m.groupdict()['packets']
octets = m.groupdict()['bytes']
if(packets == '-'):
packets = 0
if(octets == '-'):
octets = 0
if 'counters' not in interface_dict[interface]:
interface_dict[interface]['counters'] = {}
if inout_type == 'total':
interface_dict[interface]['counters'][inout + "_pkts"] = int(packets)
interface_dict[interface]['counters'][inout + "_octets"] = int(octets)
elif inout_type == 'normal':
if 'normal' not in interface_dict[interface]['counters']:
interface_dict[interface]['counters']['normal'] = {}
interface_dict[interface]['counters']['normal'][inout + "_pkts"] = int(packets)
interface_dict[interface]['counters']['normal'][inout + "_octets"] = int(octets)
continue
# r'^ *(?P<unicasts>.*) unicasts, (?P<broadcasts>.*) broadcasts, (?P<multicasts>.*) multicasts, (?P<pauses>.*) pauses$'
m = p22.match(line)
if m:
unicasts = m.groupdict()['unicasts']
broadcasts = m.groupdict()['broadcasts']
multicasts = m.groupdict()['multicasts']
pauses = m.groupdict()['pauses']
if(unicasts == '-'):
unicasts = 0
if(broadcasts == '-'):
broadcasts = 0
if(multicasts == '-'):
multicasts = 0
if(pauses == '-'):
pauses = 0
if inout_type == 'total':
interface_dict[interface]['counters'][inout + "_unicast_pkts"] = int(unicasts)
interface_dict[interface]['counters'][inout + "_broadcast_pkts"] = int(broadcasts)
interface_dict[interface]['counters'][inout + "_multicast_pkts"] = int(multicasts)
interface_dict[interface]['counters'][inout + "_mac_pause_frames"] = int(pauses)
elif inout_type == 'normal':
interface_dict[interface]['counters']['normal'][inout + "_unicast_pkts"] = int(unicasts)
interface_dict[interface]['counters']['normal'][inout + "_broadcast_pkts"] = int(broadcasts)
interface_dict[interface]['counters']['normal'][inout + "_multicast_pkts"] = int(multicasts)
interface_dict[interface]['counters']['normal'][inout + "_mac_pause_frames"] = int(pauses)
continue
# r'^ *Input: *(?P<in_errors>.*) input errors, (?P<in_runts>.*) runts, (?P<in_giants>.*) giants, (?P<in_throttles>.*) throttles$'
m = p23.match(line)
if m:
in_errors = m.groupdict()['in_errors']
in_runts = m.groupdict()['in_runts']
in_giants = m.groupdict()['in_giants']
in_throttles = m.groupdict()['in_throttles']
if(in_errors == '-'):
in_errors = 0
if(in_runts == '-'):
in_runts = 0
if(in_giants == '-'):
in_giants = 0
if(in_throttles == '-'):
in_throttles = 0
interface_dict[interface]['counters']["in_errors"] = int(in_errors)
interface_dict[interface]['counters']["in_runts"] = int(in_runts)
interface_dict[interface]['counters']["in_giants"] = int(in_giants)
interface_dict[interface]['counters']["in_throttles"] = int(in_throttles)
continue
# r'^ *(?P<in_crc_errors>.*) CRC, (?P<in_frame>.*) frame, (?P<in_overrun>.*) overruns, (?P<in_abort>.*) aborts$'
m = p24.match(line)
if m:
in_crc_errors = m.groupdict()['in_crc_errors']
in_frame = m.groupdict()['in_frame']
in_overrun = m.groupdict()['in_overrun']
in_abort = m.groupdict()['in_abort']
if(in_crc_errors == '-'):
in_crc_errors = 0
if(in_frame == '-'):
in_frame = 0
if(in_overrun == '-'):
in_overrun = 0
if(in_abort == '-'):
in_abort = 0
interface_dict[interface]['counters']["in_crc_errors"] = int(in_crc_errors)
interface_dict[interface]['counters']["in_frame"] = int(in_frame)
interface_dict[interface]['counters']["in_overrun"] = int(in_overrun)
interface_dict[interface]['counters']["in_abort"] = int(in_abort)
continue
# r'^ *(?P<in_ignored>.*) ignored, (?P<in_parity_errors>.*) parity errors$'
m = p25.match(line)
if m:
in_ignored = m.groupdict()['in_ignored']
in_parity_errors = m.groupdict()['in_parity_errors']
if(in_ignored == '-'):
in_ignored = 0
if(in_parity_errors == '-'):
in_parity_errors = 0
interface_dict[interface]['counters']["in_ignored"] = int(in_ignored)
interface_dict[interface]['counters']["in_parity_errors"] = int(in_parity_errors)
continue
# r'^ *Output: *(?P<out_errors>.*) output errors, (?P<out_underruns>.*) underruns, (?P<out_buffer_failure>.*) buffer failures$'
m = p26.match(line)
if m:
out_errors = m.groupdict()['out_errors']
out_underruns = m.groupdict()['out_underruns']
out_buffer_failure = m.groupdict()['out_buffer_failure']
if(out_errors == '-'):
out_errors = 0
if(out_underruns == '-'):
out_underruns = 0
if(out_buffer_failure == '-'):
out_buffer_failure = 0
interface_dict[interface]['counters']["out_errors"] = int(out_errors)
interface_dict[interface]['counters']["out_underruns"] = int(out_underruns)
interface_dict[interface]['counters']["out_buffer_failure"] = int(out_buffer_failure)
continue
# ^ *(?P<out_abort>.*) aborts, (?P<out_deferred>.*) deferred, (?P<out_collision>.*) collisions, (?P<out_late_collision>.*) late collisions$
m = p27.match(line)
if m:
out_abort = m.groupdict()['out_abort']
out_deferred = m.groupdict()['out_deferred']
out_collision = m.groupdict()['out_collision']
out_late_collision = m.groupdict()['out_late_collision']
if(out_abort == '-'):
out_abort = 0
if(out_deferred == '-'):
out_deferred = 0
if(out_collision == '-'):
out_collision = 0
if(out_late_collision == '-'):
out_late_collision = 0
interface_dict[interface]['counters']["out_abort"] = int(out_abort)
interface_dict[interface]['counters']["out_deferred"] = int(out_deferred)
interface_dict[interface]['counters']["out_collision"] = int(out_collision)
interface_dict[interface]['counters']["out_late_collision"] = int(out_late_collision)
continue
# '^ *(?P<out_lost_carrier>.*) lost carrier, (?P<out_no_carrier>.*) no carrier$'
m = p28.match(line)
if m:
out_lost_carrier = m.groupdict()['out_lost_carrier']
out_no_carrier = m.groupdict()['out_no_carrier']
if(out_lost_carrier == '-'):
out_lost_carrier = 0
if(out_no_carrier == '-'):
out_no_carrier = 0
interface_dict[interface]['counters']["out_lost_carrier"] = int(out_lost_carrier)
interface_dict[interface]['counters']["out_no_carrier"] = int(out_no_carrier)
continue
return interface_dict
class DisplayIpInterfaceBriefSchema(MetaParser):
"""Parser for display ip interface brief"""
schema = {
Optional('route'):
{
Any():
{
Optional('ip_address'): str,
Optional('link'): str,
Optional('protocol'): str,
Optional('description'): str
}
},
Optional('bridge'):
{
Any():
{
Optional('link'): str,
Optional('speed'): str,
Optional('duplex'): str,
Optional('type'): str,
Optional('pvid'): str,
Optional('description'): str
}
}
}
class DisplayInterfacesBrief(DisplayIpInterfaceBriefSchema):
"""parser for display interface brief
"""
##########################################################
# Other Plataforms equivalent command
##########################################################
platform_equivalent_cli = ['show interface brief']
##########################################################
# HP Comware command
##########################################################
platform_cli = ['display interface brief']
cli_command = platform_equivalent_cli + platform_cli
def cli(self, output=None):
route_dict = {}
bridge_dict = {}
parsed_dict = {}
if output is None:
cmd = self.platform_cli[0]
out = self.device.execute(cmd)
else:
out = output
route_out = ''
bridge_out = ''
table = ''
if out:
for line in out.splitlines():
if re.search('under route', line):
table = 'route'
if re.search('under bridge', line):
table = 'bridge'
if table == 'route':
route_out = route_out + line + "\n"
elif table == 'bridge':
bridge_out = bridge_out + line + "\n"
if (route_out != ''):
res = parsergen.oper_fill_tabular(device_output=route_out,
device_os='hp_comware',
table_terminal_pattern=r"^\n",
header_fields=['Interface',
'Link',
'Protocol',
'Main IP',
'Description'],
label_fields=['Interface',
'link',
'protocol',
'ip_address',
'description'],
index=[0])
if res.entries:
for intf, intf_dict in res.entries.items():
intf = Common.convert_intf_name(intf)
del intf_dict['Interface']
route_dict.setdefault('route', {}).update({intf: intf_dict})
if (bridge_out != ''):
res = parsergen.oper_fill_tabular(device_output=bridge_out,
device_os='hp_comware',
table_terminal_pattern=r"^\n",
header_fields=['Interface',
'Link',
'Speed',
'Duplex',
'Type',
'PVID',
'Description'],
label_fields=['Interface',
'link',
'speed',
'duplex',
'type',
'pvid',
'description'],
index=[0])
if res.entries:
for intf, intf_dict in res.entries.items():
intf = Common.convert_intf_name(intf)
del intf_dict['Interface']
bridge_dict.setdefault('bridge', {}).update({intf: intf_dict})
if(route_dict != {}):
parsed_dict.update(route_dict)
if(bridge_dict != {}):
parsed_dict.update(bridge_dict)
return parsed_dict
|
a362ed6562241f8348eb7f9ca5fcf3d36031ae54
|
c675ff5fcd3b13fa39352bb8cac11d75262659a8
|
/tests/test_observable/test_groupby.py
|
767ef729eb6b6f04d761af1ef15d8226ec3add5c
|
[
"MIT"
] |
permissive
|
ReactiveX/RxPY
|
469eb714996c205989e99899a6f1ab1ae2f42dd0
|
af1663d35810fdcd4c25a3ed2e8f0d71b55c341d
|
refs/heads/master
| 2023-08-14T19:27:40.086304
| 2023-01-08T10:02:08
| 2023-03-04T15:33:19
| 8,946,089
| 4,764
| 467
|
MIT
| 2023-09-05T02:53:16
| 2013-03-22T06:16:54
|
Python
|
UTF-8
|
Python
| false
| false
| 21,576
|
py
|
test_groupby.py
|
import unittest
import reactivex
from reactivex import operators as ops
from reactivex.testing import ReactiveTest, TestScheduler
on_next = ReactiveTest.on_next
on_completed = ReactiveTest.on_completed
on_error = ReactiveTest.on_error
subscribe = ReactiveTest.subscribe
subscribed = ReactiveTest.subscribed
disposed = ReactiveTest.disposed
created = ReactiveTest.created
class RxException(Exception):
pass
class TestGroupBy(unittest.TestCase):
def test_group_by_with_key_comparer(self):
scheduler = TestScheduler()
key_invoked = [0]
xs = scheduler.create_hot_observable(
on_next(90, "error"),
on_next(110, "error"),
on_next(130, "error"),
on_next(220, " foo"),
on_next(240, " FoO "),
on_next(270, "baR "),
on_next(310, "foO "),
on_next(350, " Baz "),
on_next(360, " qux "),
on_next(390, " bar"),
on_next(420, " BAR "),
on_next(470, "FOO "),
on_next(480, "baz "),
on_next(510, " bAZ "),
on_next(530, " fOo "),
on_completed(570),
on_next(580, "error"),
on_completed(600),
on_error(650, "ex"),
)
def factory():
def key_mapper(x):
key_invoked[0] += 1
return x.lower().strip()
return xs.pipe(
ops.group_by(key_mapper, lambda x: x),
ops.map(lambda g: g.key),
)
results = scheduler.start(factory)
assert results.messages == [
on_next(220, "foo"),
on_next(270, "bar"),
on_next(350, "baz"),
on_next(360, "qux"),
on_completed(570),
]
assert xs.subscriptions == [subscribe(200, 570)]
assert key_invoked[0] == 12
def test_groupby_outer_complete(self):
scheduler = TestScheduler()
key_invoked = [0]
ele_invoked = [0]
xs = scheduler.create_hot_observable(
on_next(90, "error"),
on_next(110, "error"),
on_next(130, "error"),
on_next(220, " foo"),
on_next(240, " FoO "),
on_next(270, "baR "),
on_next(310, "foO "),
on_next(350, " Baz "),
on_next(360, " qux "),
on_next(390, " bar"),
on_next(420, " BAR "),
on_next(470, "FOO "),
on_next(480, "baz "),
on_next(510, " bAZ "),
on_next(530, " fOo "),
on_completed(570),
on_next(580, "error"),
on_completed(600),
on_error(650, "ex"),
)
def factory():
def key_mapper(x):
key_invoked[0] += 1
return x.lower().strip()
def element_mapper(x):
ele_invoked[0] += 1
return x[::-1] # Yes, this is reverse string in Python
return xs.pipe(
ops.group_by(key_mapper, element_mapper),
ops.map(lambda g: g.key),
)
results = scheduler.start(factory)
assert results.messages == [
on_next(220, "foo"),
on_next(270, "bar"),
on_next(350, "baz"),
on_next(360, "qux"),
on_completed(570),
]
assert xs.subscriptions == [subscribe(200, 570)]
assert key_invoked[0] == 12
assert ele_invoked[0] == 12
def test_group_by_outer_error(self):
scheduler = TestScheduler()
key_invoked = [0]
ele_invoked = [0]
ex = "ex"
xs = scheduler.create_hot_observable(
on_next(90, "error"),
on_next(110, "error"),
on_next(130, "error"),
on_next(220, " foo"),
on_next(240, " FoO "),
on_next(270, "baR "),
on_next(310, "foO "),
on_next(350, " Baz "),
on_next(360, " qux "),
on_next(390, " bar"),
on_next(420, " BAR "),
on_next(470, "FOO "),
on_next(480, "baz "),
on_next(510, " bAZ "),
on_next(530, " fOo "),
on_error(570, ex),
on_next(580, "error"),
on_completed(600),
on_error(650, "ex"),
)
def factory():
def key_mapper(x):
key_invoked[0] += 1
return x.lower().strip()
def element_mapper(x):
ele_invoked[0] += 1
return x[::-1]
return xs.pipe(
ops.group_by(key_mapper, element_mapper),
ops.map(lambda g: g.key),
)
results = scheduler.start(factory)
assert results.messages == [
on_next(220, "foo"),
on_next(270, "bar"),
on_next(350, "baz"),
on_next(360, "qux"),
on_error(570, ex),
]
assert xs.subscriptions == [subscribe(200, 570)]
assert key_invoked[0] == 12
assert ele_invoked[0] == 12
def test_group_by_outer_dispose(self):
scheduler = TestScheduler()
key_invoked = [0]
ele_invoked = [0]
xs = scheduler.create_hot_observable(
on_next(90, "error"),
on_next(110, "error"),
on_next(130, "error"),
on_next(220, " foo"),
on_next(240, " FoO "),
on_next(270, "baR "),
on_next(310, "foO "),
on_next(350, " Baz "),
on_next(360, " qux "),
on_next(390, " bar"),
on_next(420, " BAR "),
on_next(470, "FOO "),
on_next(480, "baz "),
on_next(510, " bAZ "),
on_next(530, " fOo "),
on_completed(570),
on_next(580, "error"),
on_completed(600),
on_error(650, "ex"),
)
def factory():
def key_mapper(x):
key_invoked[0] += 1
return x.lower().strip()
def element_mapper(x):
ele_invoked[0] += 1
return x[::-1]
return xs.pipe(
ops.group_by(key_mapper, element_mapper),
ops.map(lambda g: g.key),
)
results = scheduler.start(factory, disposed=355)
assert results.messages == [
on_next(220, "foo"),
on_next(270, "bar"),
on_next(350, "baz"),
]
assert xs.subscriptions == [subscribe(200, 355)]
assert key_invoked[0] == 5
assert ele_invoked[0] == 5
def test_group_by_outer_key_on_error(self):
scheduler = TestScheduler()
key_invoked = [0]
ele_invoked = [0]
ex = "ex"
xs = scheduler.create_hot_observable(
on_next(90, "error"),
on_next(110, "error"),
on_next(130, "error"),
on_next(220, " foo"),
on_next(240, " FoO "),
on_next(270, "baR "),
on_next(310, "foO "),
on_next(350, " Baz "),
on_next(360, " qux "),
on_next(390, " bar"),
on_next(420, " BAR "),
on_next(470, "FOO "),
on_next(480, "baz "),
on_next(510, " bAZ "),
on_next(530, " fOo "),
on_completed(570),
on_next(580, "error"),
on_completed(600),
on_error(650, "ex"),
)
def factory():
def key_mapper(x):
key_invoked[0] += 1
if key_invoked[0] == 10:
raise Exception(ex)
return x.lower().strip()
def element_mapper(x):
ele_invoked[0] += 1
return x[::-1]
return xs.pipe(
ops.group_by(key_mapper, element_mapper),
ops.map(lambda g: g.key),
)
results = scheduler.start(factory)
assert results.messages == [
on_next(220, "foo"),
on_next(270, "bar"),
on_next(350, "baz"),
on_next(360, "qux"),
on_error(480, ex),
]
assert xs.subscriptions == [subscribe(200, 480)]
assert key_invoked[0] == 10
assert ele_invoked[0] == 9
def test_group_by_outer_ele_on_error(self):
scheduler = TestScheduler()
key_invoked = [0]
ele_invoked = [0]
ex = "ex"
xs = scheduler.create_hot_observable(
on_next(90, "error"),
on_next(110, "error"),
on_next(130, "error"),
on_next(220, " foo"),
on_next(240, " FoO "),
on_next(270, "baR "),
on_next(310, "foO "),
on_next(350, " Baz "),
on_next(360, " qux "),
on_next(390, " bar"),
on_next(420, " BAR "),
on_next(470, "FOO "),
on_next(480, "baz "),
on_next(510, " bAZ "),
on_next(530, " fOo "),
on_completed(570),
on_next(580, "error"),
on_completed(600),
on_error(650, "ex"),
)
def factory():
def key_mapper(x):
key_invoked[0] += 1
return x.lower().strip()
def element_mapper(x):
ele_invoked[0] += 1
if ele_invoked[0] == 10:
raise Exception(ex)
return x[::-1]
return xs.pipe(
ops.group_by(key_mapper, element_mapper),
ops.map(lambda g: g.key),
)
results = scheduler.start(factory)
assert results.messages == [
on_next(220, "foo"),
on_next(270, "bar"),
on_next(350, "baz"),
on_next(360, "qux"),
on_error(480, ex),
]
assert xs.subscriptions == [subscribe(200, 480)]
assert key_invoked[0] == 10
assert ele_invoked[0] == 10
def test_group_by_inner_complete(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(90, "error"),
on_next(110, "error"),
on_next(130, "error"),
on_next(220, " foo"),
on_next(240, " FoO "),
on_next(270, "baR "),
on_next(310, "foO "),
on_next(350, " Baz "),
on_next(360, " qux "),
on_next(390, " bar"),
on_next(420, " BAR "),
on_next(470, "FOO "),
on_next(480, "baz "),
on_next(510, " bAZ "),
on_next(530, " fOo "),
on_completed(570),
on_next(580, "error"),
on_completed(600),
on_error(650, "ex"),
)
c = {
"outer_subscription": None,
"inner_subscriptions": {},
"inners": {},
"results": {},
"outer": None,
}
def action1(scheduler, state):
c["outer"] = xs.pipe(
ops.group_by(lambda x: x.lower().strip(), lambda x: x[::-1]),
)
scheduler.schedule_absolute(created, action1)
def action2(scheduler, state):
def next(group):
result = scheduler.create_observer()
c["inners"][group.key] = group
c["results"][group.key] = result
def action21(scheduler, state):
c["inner_subscriptions"][group.key] = group.subscribe(
result, scheduler
)
scheduler.schedule_relative(100, action21)
c["outer_subscription"] = c["outer"].subscribe(next, scheduler=scheduler)
scheduler.schedule_absolute(subscribed, action2)
def action3(scheduler, state):
c["outer_subscription"].dispose()
for sub in c["inner_subscriptions"].values():
sub.dispose()
scheduler.schedule_absolute(disposed, action3)
scheduler.start()
assert len(c["inners"]) == 4
assert c["results"]["foo"].messages == [
on_next(470, " OOF"),
on_next(530, " oOf "),
on_completed(570),
]
assert c["results"]["bar"].messages == [
on_next(390, "rab "),
on_next(420, " RAB "),
on_completed(570),
]
assert c["results"]["baz"].messages == [
on_next(480, " zab"),
on_next(510, " ZAb "),
on_completed(570),
]
assert c["results"]["qux"].messages == [on_completed(570)]
assert xs.subscriptions == [subscribe(200, 570)]
def test_group_by_inner_complete_all(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(90, "error"),
on_next(110, "error"),
on_next(130, "error"),
on_next(220, " foo"),
on_next(240, " FoO "),
on_next(270, "baR "),
on_next(310, "foO "),
on_next(350, " Baz "),
on_next(360, " qux "),
on_next(390, " bar"),
on_next(420, " BAR "),
on_next(470, "FOO "),
on_next(480, "baz "),
on_next(510, " bAZ "),
on_next(530, " fOo "),
on_completed(570),
on_next(580, "error"),
on_completed(600),
on_error(650, "ex"),
)
inners = {}
inner_subscriptions = {}
results = {}
c = {"outer": None, "outer_subscription": None, "result": None}
def action1(scheduler, state):
c["outer"] = xs.pipe(
ops.group_by(
lambda x: x.lower().strip(),
lambda x: x[::-1],
)
)
return c["outer"]
scheduler.schedule_absolute(created, action1)
def action2(scheduler, state):
def on_next(group):
c["result"] = scheduler.create_observer()
inners[group.key] = group
results[group.key] = c["result"]
inner_subscriptions[group.key] = group.subscribe(c["result"], scheduler)
c["outer_subscription"] = c["outer"].subscribe(on_next, scheduler=scheduler)
return c["outer_subscription"]
scheduler.schedule_absolute(subscribed, action2)
def action3(scheduler, state):
c["outer_subscription"].dispose()
for sub in inner_subscriptions.values():
sub.dispose()
scheduler.schedule_absolute(disposed, action3)
scheduler.start()
assert len(inners) == 4
assert results["foo"].messages == [
on_next(220, "oof "),
on_next(240, " OoF "),
on_next(310, " Oof"),
on_next(470, " OOF"),
on_next(530, " oOf "),
on_completed(570),
]
assert results["bar"].messages == [
on_next(270, " Rab"),
on_next(390, "rab "),
on_next(420, " RAB "),
on_completed(570),
]
assert results["baz"].messages == [
on_next(350, " zaB "),
on_next(480, " zab"),
on_next(510, " ZAb "),
on_completed(570),
]
assert results["qux"].messages == [on_next(360, " xuq "), on_completed(570)]
assert xs.subscriptions == [subscribe(200, 570)]
def test_group_by_inner_error(self):
ex = "ex1"
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(90, "error"),
on_next(110, "error"),
on_next(130, "error"),
on_next(220, " foo"),
on_next(240, " FoO "),
on_next(270, "baR "),
on_next(310, "foO "),
on_next(350, " Baz "),
on_next(360, " qux "),
on_next(390, " bar"),
on_next(420, " BAR "),
on_next(470, "FOO "),
on_next(480, "baz "),
on_next(510, " bAZ "),
on_next(530, " fOo "),
on_error(570, ex),
on_next(580, "error"),
on_completed(600),
on_error(650, "ex"),
)
inner_subscriptions = {}
inners = {}
results = {}
c = {"outer_subscription": None, "outer": None}
def action1(scheduler, state):
c["outer"] = xs.pipe(
ops.group_by(
lambda x: x.lower().strip(),
lambda x: x[::-1],
)
)
return c["outer"]
scheduler.schedule_absolute(created, action1)
def action2(scheduler, state):
def on_next(group):
result = scheduler.create_observer()
inners[group.key] = group
results[group.key] = result
def action3(scheduler, state):
inner_subscriptions[group.key] = group.subscribe(result, scheduler)
scheduler.schedule_relative(100, action3)
c["outer_subscription"] = c["outer"].subscribe(
on_next, lambda e: None, scheduler=scheduler
)
return c["outer_subscription"]
scheduler.schedule_absolute(subscribed, action2)
def action4(scheduler, state):
c["outer_subscription"].dispose()
for sub in inner_subscriptions.values():
sub.dispose()
scheduler.schedule_absolute(disposed, action4)
scheduler.start()
assert len(inners) == 4
assert results["foo"].messages == [
on_next(470, " OOF"),
on_next(530, " oOf "),
on_error(570, ex),
]
assert results["bar"].messages == [
on_next(390, "rab "),
on_next(420, " RAB "),
on_error(570, ex),
]
assert results["baz"].messages == [
on_next(480, " zab"),
on_next(510, " ZAb "),
on_error(570, ex),
]
assert results["qux"].messages == [on_error(570, ex)]
assert xs.subscriptions == [subscribe(200, 570)]
def test_group_by_with_merge(self):
scheduler = TestScheduler()
xs = [None]
results = [None]
def action1(scheduler, state):
xs[0] = reactivex.from_iterable(
["alpha", "apple", "beta", "bat", "gamma"]
).pipe(
ops.group_by(lambda s: s[0]),
ops.map(lambda xs: xs.pipe(ops.to_iterable(), ops.map(list))),
ops.merge_all(),
)
scheduler.schedule_absolute(created, action1)
def action2(scheduler, state):
results[0] = scheduler.create_observer()
xs[0].subscribe(results[0], scheduler)
scheduler.schedule_absolute(subscribed, action2)
scheduler.start()
assert results[0].messages == [
on_next(200, ["alpha", "apple"]),
on_next(200, ["beta", "bat"]),
on_next(200, ["gamma"]),
on_completed(200),
]
def test_group_by_with_ReplaySubject(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(300, 1),
on_next(310, 2),
on_next(320, 3),
on_next(320, 4),
on_next(320, 5),
on_next(320, 6),
on_completed(1000),
)
observer_groups = scheduler.create_observer()
observer_odd = scheduler.create_observer()
observer_even = scheduler.create_observer()
def subscription(scheduler, state):
source = xs.pipe(
ops.group_by(
key_mapper=lambda x: x % 2,
element_mapper=None,
subject_mapper=lambda: reactivex.subject.ReplaySubject(2),
)
)
return source.subscribe(observer_groups, scheduler=scheduler)
scheduler.schedule_absolute(290, subscription)
scheduler.advance_to(500)
# extract grouped observables from messages list
groups = {
m.value.value.key: m.value.value
for m in observer_groups.messages
if m.value.kind == "N"
}
def subscription_odd(scheduler, state):
source = groups[1]
return source.subscribe(observer_odd, scheduler=scheduler)
def subscription_even(scheduler, state):
source = groups[0]
return source.subscribe(observer_even, scheduler=scheduler)
scheduler.schedule_absolute(500, subscription_odd)
scheduler.schedule_absolute(600, subscription_even)
scheduler.advance_to(1100)
# only the last 2 items of odd/even are received because the
# ReplaySubject has been configured with a buffer size of 2
assert observer_odd.messages == [
on_next(500, 3),
on_next(500, 5),
on_completed(1000),
]
assert observer_even.messages == [
on_next(600, 4),
on_next(600, 6),
on_completed(1000),
]
if __name__ == "__main__":
unittest.main()
|
3295c604ec4ff7e4996651b2c438fd2f6d209292
|
cdfb7effe200e064578ce4670c3f9cdf5881cba1
|
/preprocess.py
|
8fc840805747c563c48e2f01587d1e8476225fa1
|
[] |
no_license
|
aub-mind/arabert
|
964c102b2fd887effc9d0ab018b38637d717e353
|
6fcebaebc97844d4b498900daa6314257f22c042
|
refs/heads/master
| 2022-10-29T21:37:30.202662
| 2022-10-17T00:26:39
| 2022-10-17T00:26:39
| 242,163,521
| 531
| 134
| null | 2022-10-17T00:26:40
| 2020-02-21T14:54:35
|
Python
|
UTF-8
|
Python
| false
| false
| 28,826
|
py
|
preprocess.py
|
import html
import logging
import re
from typing import List
import pyarabic.araby as araby
ACCEPTED_MODELS = [
"bert-base-arabertv01",
"bert-base-arabert",
"bert-base-arabertv02",
"bert-base-arabertv2",
"bert-base-arabertv02-twitter",
"bert-large-arabertv02",
"bert-large-arabertv2",
"bert-large-arabertv02-twitter",
"araelectra-base",
"araelectra-base-discriminator",
"araelectra-base-generator",
"araelectra-base-artydiqa",
"aragpt2-base",
"aragpt2-medium",
"aragpt2-large",
"aragpt2-mega",
]
SEGMENTED_MODELS = [
"bert-base-arabert",
"bert-base-arabertv2",
"bert-large-arabertv2",
]
SECOND_GEN_MODELS = [
"bert-base-arabertv02",
"bert-base-arabertv2",
"bert-base-arabertv02-twitter",
"bert-large-arabertv02",
"bert-large-arabertv2",
"bert-large-arabertv02-twitter",
"araelectra-base",
"araelectra-base-discriminator",
"araelectra-base-generator",
"araelectra-base-artydiqa",
"aragpt2-base",
"aragpt2-medium",
"aragpt2-large",
"aragpt2-mega",
]
TWEET_MODELS = [
"bert-base-arabertv02-twitter",
"bert-large-arabertv02-twitter",
]
PREFIX_LIST = [
"ال",
"و",
"ف",
"ب",
"ك",
"ل",
"لل",
"\u0627\u0644",
"\u0648",
"\u0641",
"\u0628",
"\u0643",
"\u0644",
"\u0644\u0644",
"س",
]
SUFFIX_LIST = [
"ه",
"ها",
"ك",
"ي",
"هما",
"كما",
"نا",
"كم",
"هم",
"هن",
"كن",
"ا",
"ان",
"ين",
"ون",
"وا",
"ات",
"ت",
"ن",
"ة",
"\u0647",
"\u0647\u0627",
"\u0643",
"\u064a",
"\u0647\u0645\u0627",
"\u0643\u0645\u0627",
"\u0646\u0627",
"\u0643\u0645",
"\u0647\u0645",
"\u0647\u0646",
"\u0643\u0646",
"\u0627",
"\u0627\u0646",
"\u064a\u0646",
"\u0648\u0646",
"\u0648\u0627",
"\u0627\u062a",
"\u062a",
"\u0646",
"\u0629",
]
# the never_split list is ussed with the transformers library
_PREFIX_SYMBOLS = [x + "+" for x in PREFIX_LIST]
_SUFFIX_SYMBOLS = ["+" + x for x in SUFFIX_LIST]
_OTHER_TOKENS = ["[رابط]", "[مستخدم]", "[بريد]"]
NEVER_SPLIT_TOKENS = list(set(_PREFIX_SYMBOLS + _SUFFIX_SYMBOLS + _OTHER_TOKENS))
URL_REGEXES = [
r"(http(s)?:\/\/.)?(www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*)",
r"@(https?|ftp)://(-\.)?([^\s/?\.#-]+\.?)+(/[^\s]*)?$@iS",
r"http[s]?://[a-zA-Z0-9_\-./~\?=%&]+",
r"www[a-zA-Z0-9_\-?=%&/.~]+",
r"[a-zA-Z]+\.com",
r"(?=http)[^\s]+",
r"(?=www)[^\s]+",
r"://",
]
USER_MENTION_REGEX = r"@[\w\d]+"
EMAIL_REGEXES = [r"[\w-]+@([\w-]+\.)+[\w-]+", r"\S+@\S+"]
REDUNDANT_PUNCT_PATTERN = (
r"([!\"#\$%\'\(\)\*\+,\.:;\-<=·>?@\[\\\]\^_ـ`{\|}~—٪’،؟`୍“؛”ۚ【»؛\s+«–…‘]{2,})"
)
REGEX_TATWEEL = r"(\D)\1{2,}"
MULTIPLE_CHAR_PATTERN = re.compile(r"(\D)\1{2,}", re.DOTALL)
REJECTED_CHARS_REGEX = r"[^0-9\u0621-\u063A\u0640-\u066C\u0671-\u0674a-zA-Z\[\]!\"#\$%\'\(\)\*\+,\.:;\-<=·>?@\[\\\]\^_ـ`{\|}~—٪’،؟`୍“؛”ۚ»؛\s+«–…‘]"
REJECTED_CHARS_REGEXV2 = r"[^0-9\u0621-\u063A\u0641-\u066C\u0671-\u0674a-zA-Z\[\]!\"#\$%\'\(\)\*\+,\.:;\-<=·>?@\[\\\]\^_ـ`{\|}~—٪’،؟`୍“؛”ۚ»؛\s+«–…‘/]"
REGEX_URL_STEP1 = r"(?=http)[^\s]+"
REGEX_URL_STEP2 = r"(?=www)[^\s]+"
REGEX_URL = r"(http(s)?:\/\/.)?(www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*)"
REGEX_MENTION = r"@[\w\d]+"
REGEX_EMAIL = r"\S+@\S+"
CHARS_REGEX = r"0-9\u0621-\u063A\u0640-\u066C\u0671-\u0674a-zA-Z\[\]!\"#\$%\'\(\)\*\+,\.:;\-<=·>?@\[\\\]\^_ـ`{\|}~—٪’،؟`୍“؛”ۚ»؛\s+«–…‘"
CHARS_REGEXV2 = r"0-9\u0621-\u063A\u0640-\u066C\u0671-\u0674a-zA-Z\[\]!\"#\$%\'\(\)\*\+,\.:;\-<=·>?@\[\\\]\^_ـ`{\|}~—٪’،؟`୍“؛”ۚ»؛\s+«–…‘/"
WHITE_SPACED_DOUBLE_QUOTATION_REGEX = r'\"\s+([^"]+)\s+\"'
WHITE_SPACED_SINGLE_QUOTATION_REGEX = r"\'\s+([^']+)\s+\'"
WHITE_SPACED_BACK_QUOTATION_REGEX = r"\`\s+([^`]+)\s+\`"
WHITE_SPACED_EM_DASH = r"\—\s+([^—]+)\s+\—"
LEFT_SPACED_CHARS = r" ([\]!#\$%\),\.:;\?}٪’،؟”؛…»·])"
RIGHT_SPACED_CHARS = r"([\[\(\{“«‘*\~]) "
LEFT_AND_RIGHT_SPACED_CHARS = r" ([\+\-\<\=\>\@\\\^\_\|\–]) "
_HINDI_NUMS = "٠١٢٣٤٥٦٧٨٩"
_ARABIC_NUMS = "0123456789"
HINDI_TO_ARABIC_MAP = str.maketrans(_HINDI_NUMS, _ARABIC_NUMS)
class ArabertPreprocessor:
"""
A Preprocessor class that cleans and preprocesses text for all models in the AraBERT repo.
It also can unprocess the text ouput of the generated text
Args:
model_name (:obj:`str`): model name from the HuggingFace Models page without
the aubmindlab tag. Will default to a base Arabic preprocessor if model name was not found.
Current accepted models are:
- "bert-base-arabertv01"
- "bert-base-arabert"
- "bert-base-arabertv02"
- "bert-base-arabertv2"
- "bert-base-arabertv02-twitter"
- "bert-large-arabertv02"
- "bert-large-arabertv2"
- "bert-large-arabertv02-twitter"
- "araelectra-base"
- "araelectra-base-discriminator"
- "araelectra-base-generator"
- "araelectra-base-artydiqa"
- "aragpt2-base"
- "aragpt2-medium"
- "aragpt2-large"
- "aragpt2-mega"
remove_html_markup(:obj: `bool`, `optional`, defaults to :obj:`True`): Whether to remove html artfacts,
should be set to False when preprocessing TyDi QA.
replace_urls_emails_mentions(:obj:`bool`, `optional`, defaults to :obj:`True`): Whether to replace email urls
and mentions by special tokens.
strip_tashkeel(:obj:`bool`, `optional`, defaults to :obj:`True`): remove diacritics (FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA,
KASRA, SUKUN, SHADDA).
strip_tatweel(:obj:`bool`, `optional`, defaults to :obj:`True`): remove tatweel '\\u0640'.
insert_white_spaces(:obj:`bool`, `optional`, defaults to :obj:`True`): insert whitespace before and after all non Arabic digits
or English digits or Arabic and English Alphabet or the 2 brackets, then inserts whitespace
between words and numbers or numbers and words.
remove_non_digit_repetition(:obj:`bool`, `optional`, defaults to :obj:`True`): replace repetition of more than 2 non-digit character with
2 of this character.
replace_slash_with_dash(:obj:`bool`, `optional`, defaults to :obj:`None`): Will be automatically set to True in AraBERTv02,
AraELECTRA and AraGPT2.
Set to False to force disable, and True to force enable. Replaces the "/" with "-",
since "/" is missing from AraBERTv2, AraELECTRA and ARAGPT2 vocabulary.
map_hindi_numbers_to_arabic(:obj:`bool`, `optional`, defaults to :obj:`None`): Will be automatically set to True in
AraBERTv02, AraELECTRA and AraGPT2.Set to False to force disable, and True to force enable.
Replaces hindi numbers with the corresponding Arabic one. ex: "١٩٩٥" --> "1995".
This is behavior is present by default in AraBERTv1 and v2 (with pre-segmentation),
and fixes the issue of caused by a bug when inserting white spaces.
apply_farasa_segmentation(:obj:`bool`, `optional`, defaults to :obj:`None`): Will be automatically set to True in
AraBERTv2, and AraBERTv1. Set to False to force disable, and True to force enable.
keep_emojis(:obj:`bool`, `optional`, defaults to :obj:`None`): don't remove emojis while preprocessing.
Will be automatically set to True in AraBERT trained on tweets.
Returns:
ArabertPreprocessor: A preprocessor instance
Example:
from preprocess import ArabertPreprocessor
arabert_prep = ArabertPreprocessor("aubmindlab/bert-base-arabertv2")
arabert_prep.preprocess("SOME ARABIC TEXT")
"""
def __init__(
self,
model_name: str,
remove_html_markup: bool = True,
replace_urls_emails_mentions: bool = True,
strip_tashkeel: bool = True,
strip_tatweel: bool = True,
insert_white_spaces: bool = True,
remove_non_digit_repetition: bool = True,
keep_emojis: bool = None,
replace_slash_with_dash: bool = None,
map_hindi_numbers_to_arabic: bool = None,
apply_farasa_segmentation: bool = None,
):
model_name = model_name.replace("aubmindlab/", "").replace("wissamantoun/", "")
if model_name not in ACCEPTED_MODELS:
logging.warning(
"""Model provided is not in the accepted model list. Preprocessor will default to a base Arabic preprocessor"""
)
self.model_name = "bert-base-arabertv02"
else:
self.model_name = model_name
if apply_farasa_segmentation is None:
if self.model_name in SEGMENTED_MODELS:
self.apply_farasa_segmentation = True
else:
self.apply_farasa_segmentation = False
else:
if (
apply_farasa_segmentation == False
and self.model_name in SEGMENTED_MODELS
):
logging.warning(
"The selected model_name requires Farasa pre-segmentation, but apply_farasa_segmentation was set to False!"
)
self.apply_farasa_segmentation = apply_farasa_segmentation
if self.apply_farasa_segmentation:
try:
from farasa.segmenter import FarasaSegmenter
self.farasa_segmenter = FarasaSegmenter(interactive=True)
except ModuleNotFoundError:
logging.error(
"farasapy is not installed, you want be able to process text for AraBERTv1 and v2. Install it using: pip install farasapy"
)
if keep_emojis is None:
if self.model_name in TWEET_MODELS:
self.keep_emojis = True
else:
self.keep_emojis = False
else:
if keep_emojis == False and self.model_name in TWEET_MODELS:
logging.warning(
"The selected model_name is trained on emojis, but keep_emojis was set to False!"
)
self.keep_emojis = keep_emojis
if self.keep_emojis:
import emoji
self.emoji = emoji
if self.apply_farasa_segmentation:
logging.warning(
"Keeping tweets with Farasa Segmentation is 10 times slower"
)
emoji_regex = "".join(list(self.emoji.UNICODE_EMOJI["en"].keys()))
self.REJECTED_CHARS_REGEX = "[^%s%s]" % (
CHARS_REGEX if self.model_name in SECOND_GEN_MODELS else CHARS_REGEXV2,
emoji_regex,
)
else:
self.REJECTED_CHARS_REGEX = (
REJECTED_CHARS_REGEX
if self.model_name in SECOND_GEN_MODELS
else REJECTED_CHARS_REGEXV2
)
self.remove_html_markup = remove_html_markup
self.replace_urls_emails_mentions = replace_urls_emails_mentions
self.strip_tashkeel = strip_tashkeel
self.strip_tatweel = strip_tatweel
self.insert_white_spaces = insert_white_spaces
self.remove_non_digit_repetition = remove_non_digit_repetition
if replace_slash_with_dash is None:
if self.model_name in SECOND_GEN_MODELS:
self.replace_slash_with_dash = True
else:
self.replace_slash_with_dash = False
else:
self.replace_slash_with_dash = replace_slash_with_dash
if map_hindi_numbers_to_arabic is None:
if self.model_name in SECOND_GEN_MODELS:
self.map_hindi_numbers_to_arabic = True
else:
self.map_hindi_numbers_to_arabic = False
else:
self.map_hindi_numbers_to_arabic = map_hindi_numbers_to_arabic
def preprocess(self, text: str) -> str:
"""
Preprocess takes an input text line an applies the same preprocessing used in AraBERT
pretraining, or according to settings
Args:
text (:obj:`str`): inout text string
Returns:
string: A preprocessed string depending on which model was selected
"""
if (
self.model_name == "bert-base-arabert"
or self.model_name == "bert-base-arabertv01"
):
return self._preprocess_v1(
text,
do_farasa_tokenization=self.apply_farasa_segmentation,
)
if self.model_name in SECOND_GEN_MODELS:
return self._preprocess_v2(text)
return self._preprocess_v3(text)
def unpreprocess(self, text: str, desegment: bool = True) -> str:
"""Re-formats the text to a classic format where punctuations, brackets, parenthesis are not seperated by whitespaces.
The objective is to make the generated text of any model appear natural and not preprocessed.
Args:
text (:obj:`str`): input text to be un-preprocessed
desegment (:obj:`bool`, optional): [whether or not to remove farasa pre-segmentation before]..
Returns:
str: The unpreprocessed (and possibly Farasa-desegmented) text.
"""
if self.apply_farasa_segmentation and desegment:
text = self.desegment(text)
# removes the spaces around quotation marks ex: i " ate " an apple --> i "ate" an apple
# https://stackoverflow.com/a/53436792/5381220
text = re.sub(WHITE_SPACED_DOUBLE_QUOTATION_REGEX, '"' + r"\1" + '"', text)
text = re.sub(WHITE_SPACED_SINGLE_QUOTATION_REGEX, "'" + r"\1" + "'", text)
text = re.sub(WHITE_SPACED_BACK_QUOTATION_REGEX, "\`" + r"\1" + "\`", text)
text = re.sub(WHITE_SPACED_EM_DASH, "\—" + r"\1" + "\—", text)
# during generation, sometimes the models don't put a space after the dot, this handles it
text = text.replace(".", " . ")
text = " ".join(text.split())
# handle decimals
text = re.sub(r"(\d+) \. (\d+)", r"\1.\2", text)
text = re.sub(r"(\d+) \, (\d+)", r"\1,\2", text)
text = re.sub(LEFT_AND_RIGHT_SPACED_CHARS, r"\1", text)
text = re.sub(LEFT_SPACED_CHARS, r"\1", text)
text = re.sub(RIGHT_SPACED_CHARS, r"\1", text)
return text
def desegment(self, text: str) -> str:
"""
Use this function if sentence tokenization was done using
`from arabert.preprocess_arabert import preprocess` with Farasa enabled
AraBERT segmentation using Farasa adds a space after the '+' for prefixes,
and after before the '+' for suffixes
Example:
>>> desegment('ال+ دراس +ات')
الدراسات
"""
text = text.replace("+ ", "+")
text = text.replace(" +", "+")
text = " ".join([self._desegmentword(word) for word in text.split(" ")])
return text
def _desegmentword(self, orig_word: str) -> str:
"""
Word segmentor that takes a Farasa Segmented Word and removes the '+' signs
Example:
>>> _desegmentword("ال+يومي+ة")
اليومية
"""
word = orig_word.replace("ل+ال+", "لل")
if "ال+ال" not in orig_word:
word = word.replace("ل+ال", "لل")
word = word.replace("+", "")
word = word.replace("للل", "لل")
return word
def _preprocess_v3(self, text: str) -> str:
text = str(text)
text = html.unescape(text)
if self.strip_tashkeel:
text = araby.strip_tashkeel(text)
if self.strip_tatweel:
text = araby.strip_tatweel(text)
if self.replace_urls_emails_mentions:
# replace all possible URLs
for reg in URL_REGEXES:
text = re.sub(reg, " [رابط] ", text)
# REplace Emails with [بريد]
for reg in EMAIL_REGEXES:
text = re.sub(reg, " [بريد] ", text)
# replace mentions with [مستخدم]
text = re.sub(USER_MENTION_REGEX, " [مستخدم] ", text)
if self.remove_html_markup:
# remove html line breaks
text = re.sub("<br />", " ", text)
# remove html markup
text = re.sub("</?[^>]+>", " ", text)
if self.map_hindi_numbers_to_arabic:
text = text.translate(HINDI_TO_ARABIC_MAP)
# remove repeated characters >2
if self.remove_non_digit_repetition:
text = self._remove_non_digit_repetition(text)
# insert whitespace before and after all non Arabic digits or English Digits and Alphabet and the 2 brackets
if self.insert_white_spaces:
text = re.sub(
"([^0-9\u0621-\u063A\u0641-\u064A\u0660-\u0669a-zA-Z ])",
r" \1 ",
text,
)
# re-fix brackets
text = text.replace("[ رابط ]", "[رابط]")
text = text.replace("[ بريد ]", "[بريد]")
text = text.replace("[ مستخدم ]", "[مستخدم]")
# insert whitespace between words and numbers or numbers and words
text = re.sub(
"(\d+)([\u0621-\u063A\u0641-\u064A\u066A-\u066C\u0654-\u0655]+)",
r" \1 \2 ",
text,
)
text = re.sub(
"([\u0621-\u063A\u0641-\u064A\u066A-\u066C\u0654-\u0655]+)(\d+)",
r" \1 \2 ",
text,
)
# remove unwanted characters
text = re.sub(self.REJECTED_CHARS_REGEX, " ", text)
# remove extra spaces
text = " ".join(text.replace("\uFE0F", "").split())
if self.apply_farasa_segmentation:
if self.keep_emojis:
new_text = []
for word in text.split():
if word in list(self.emoji.UNICODE_EMOJI["en"].keys()):
new_text.append(word)
else:
new_text.append(self.farasa_segmenter.segment(word))
text = " ".join(new_text)
else:
text = self.farasa_segmenter.segment(text)
return self._farasa_segment(text)
# ALl the other models dont require Farasa Segmentation
return text
def _preprocess_v2(self, text: str) -> str:
text = str(text)
text = html.unescape(text)
if self.strip_tashkeel:
text = araby.strip_tashkeel(text)
if self.strip_tatweel:
text = araby.strip_tatweel(text)
if self.replace_urls_emails_mentions:
# replace all possible URLs
for reg in URL_REGEXES:
text = re.sub(reg, " [رابط] ", text)
# REplace Emails with [بريد]
for reg in EMAIL_REGEXES:
text = re.sub(reg, " [بريد] ", text)
# replace mentions with [مستخدم]
text = re.sub(USER_MENTION_REGEX, " [مستخدم] ", text)
if self.remove_html_markup:
# remove html line breaks
text = re.sub("<br />", " ", text)
# remove html markup
text = re.sub("</?[^>]+>", " ", text)
if self.map_hindi_numbers_to_arabic:
text = text.translate(HINDI_TO_ARABIC_MAP)
# remove repeated characters >2
if self.remove_non_digit_repetition:
text = self._remove_non_digit_repetition(text)
# insert whitespace before and after all non Arabic digits or English Digits and Alphabet and the 2 brackets
if self.insert_white_spaces:
text = re.sub(
"([^0-9\u0621-\u063A\u0641-\u064A\u0660-\u0669a-zA-Z\[\]])",
r" \1 ",
text,
)
# insert whitespace between words and numbers or numbers and words
text = re.sub(
"(\d+)([\u0621-\u063A\u0641-\u064A\u0660-\u066C]+)", r" \1 \2 ", text
)
text = re.sub(
"([\u0621-\u063A\u0641-\u064A\u0660-\u066C]+)(\d+)", r" \1 \2 ", text
)
if self.replace_slash_with_dash:
text = text.replace("/", "-")
# remove unwanted characters
text = re.sub(self.REJECTED_CHARS_REGEX, " ", text)
# remove extra spaces
text = " ".join(text.replace("\uFE0F", "").split())
if (
self.model_name == "bert-base-arabertv2"
or self.model_name == "bert-large-arabertv2"
):
if self.keep_emojis:
new_text = []
for word in text.split():
if word in list(self.emoji.UNICODE_EMOJI["en"].keys()):
new_text.append(word)
else:
new_text.append(self.farasa_segmenter.segment(word))
text = " ".join(new_text)
else:
text = self.farasa_segmenter.segment(text)
return self._farasa_segment(text)
# ALl the other models dont require Farasa Segmentation
return text
def _preprocess_v1(self, text: str, do_farasa_tokenization: bool) -> str:
"""
AraBERTv1 preprocessing Function
"""
text = str(text)
if self.strip_tashkeel:
text = araby.strip_tashkeel(text)
text = re.sub(r"\d+\/[ء-ي]+\/\d+\]", "", text)
text = re.sub("ـ", "", text)
text = re.sub("[«»]", ' " ', text)
if self.replace_urls_emails_mentions:
# replace the [رابط] token with space if you want to clean links
text = re.sub(REGEX_URL_STEP1, "[رابط]", text)
text = re.sub(REGEX_URL_STEP2, "[رابط]", text)
text = re.sub(REGEX_URL, "[رابط]", text)
text = re.sub(REGEX_EMAIL, "[بريد]", text)
text = re.sub(REGEX_MENTION, "[مستخدم]", text)
text = re.sub("…", r"\.", text).strip()
text = self._remove_redundant_punct(text)
if self.replace_urls_emails_mentions:
text = re.sub(r"\[ رابط \]|\[ رابط\]|\[رابط \]", " [رابط] ", text)
text = re.sub(r"\[ بريد \]|\[ بريد\]|\[بريد \]", " [بريد] ", text)
text = re.sub(r"\[ مستخدم \]|\[ مستخدم\]|\[مستخدم \]", " [مستخدم] ", text)
if self.remove_non_digit_repetition:
text = self._remove_non_digit_repetition(text)
if self.insert_white_spaces:
text = re.sub(
"([^0-9\u0621-\u063A\u0641-\u0669\u0671-\u0673a-zA-Z\[\]])",
r" \1 ",
text,
)
if do_farasa_tokenization:
text = self._tokenize_arabic_words_farasa(text)
text = " ".join(text.split())
return text
def _farasa_segment(self, text: str) -> str:
line_farasa = text.split()
segmented_line = []
for index, word in enumerate(line_farasa):
if word in ["[", "]"]:
continue
if word in ["رابط", "بريد", "مستخدم"] and line_farasa[index - 1] in [
"[",
"]",
]:
segmented_line.append("[" + word + "]")
continue
if "+" not in word:
segmented_line.append(word)
continue
segmented_word = self._split_farasa_output(word)
segmented_line.extend(segmented_word)
return " ".join(segmented_line)
def _split_farasa_output(self, word: str) -> str:
segmented_word = []
temp_token = ""
for i, c in enumerate(word):
if c == "+":
# if the token is KAF, it could be a suffix or prefix
if temp_token == "ك":
# if we are at the second token, then KAF is surely a prefix
if i == 1:
segmented_word.append(temp_token + "+")
temp_token = ""
# If the KAF token is between 2 tokens
elif word[i - 2] == "+":
# if the previous token is prefix, then this KAF must be a prefix
if segmented_word[-1][-1] == "+":
segmented_word.append(temp_token + "+")
temp_token = ""
# else it is a suffix, this KAF could not be a second suffix
else:
segmented_word.append("+" + temp_token)
temp_token = ""
# if Kaf is at the end, this is handled with the statement after the loop
elif temp_token in PREFIX_LIST:
segmented_word.append(temp_token + "+")
temp_token = ""
elif temp_token in SUFFIX_LIST:
segmented_word.append("+" + temp_token)
temp_token = ""
else:
segmented_word.append(temp_token)
temp_token = ""
continue
temp_token += c
if temp_token != "":
if temp_token in SUFFIX_LIST:
segmented_word.append("+" + temp_token)
else:
segmented_word.append(temp_token)
return segmented_word
def _tokenize_arabic_words_farasa(self, line_input: str) -> str:
if self.keep_emojis:
# insert whitespace before and after all non Arabic digits or English Digits and Alphabet and the 2 brackets
line_farasa = []
for word in line_input.split():
if word in list(self.emoji.UNICODE_EMOJI["en"].keys()):
line_farasa.append(word)
else:
line_farasa.append(self.farasa_segmenter.segment(word))
else:
line_farasa = self.farasa_segmenter.segment(line_input).split()
segmented_line = []
for index, word in enumerate(line_farasa):
if word in ["[", "]"]:
continue
if word in ["رابط", "بريد", "مستخدم"] and line_farasa[index - 1] in [
"[",
"]",
]:
segmented_line.append("[" + word + "]")
continue
segmented_word = []
for token in word.split("+"):
if token in PREFIX_LIST:
segmented_word.append(token + "+")
elif token in SUFFIX_LIST:
segmented_word.append("+" + token)
else:
segmented_word.append(token)
segmented_line.extend(segmented_word)
return " ".join(segmented_line)
def _remove_non_digit_repetition(self, text: str) -> str:
"""
:param text: the input text to remove elongation
:return: delongated text
"""
# loop over the number of times the regex matched the text
# OLD
# for index_ in range(len(re.findall(REGEX_TATWEEL, text))):
# elongation = re.search(REGEX_TATWEEL, text)
# if elongation:
# elongation_pattern = elongation.group()
# elongation_replacement = elongation_pattern[0]
# elongation_pattern = re.escape(elongation_pattern)
# text = re.sub(
# elongation_pattern, elongation_replacement, text, flags=re.MULTILINE
# )
# else:
# break
# New
text = MULTIPLE_CHAR_PATTERN.sub(r"\1\1", text)
return text
def _remove_redundant_punct(self, text: str) -> str:
text_ = text
result = re.search(REDUNDANT_PUNCT_PATTERN, text)
dif = 0
while result:
sub = result.group()
sub = sorted(set(sub), key=sub.index)
sub = " " + "".join(list(sub)) + " "
text = "".join(
(text[: result.span()[0] + dif], sub, text[result.span()[1] + dif :])
)
text_ = "".join(
(text_[: result.span()[0]], text_[result.span()[1] :])
).strip()
dif = abs(len(text) - len(text_))
result = re.search(REDUNDANT_PUNCT_PATTERN, text_)
text = re.sub(r"\s+", " ", text)
return text.strip()
|
1ee5be576b1bcdd67f8df9ebf16b4c38711f4fdd
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/__init__.py
|
552f30e13d69893bb4aa6863f9cd229a3d02d9a8
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 15,113
|
py
|
__init__.py
|
# coding: utf-8
from __future__ import absolute_import
# import models into model package
from huaweicloudsdkdns.v2.model.alias_target import AliasTarget
from huaweicloudsdkdns.v2.model.associate_health_check_req import AssociateHealthCheckReq
from huaweicloudsdkdns.v2.model.associate_health_check_request import AssociateHealthCheckRequest
from huaweicloudsdkdns.v2.model.associate_health_check_response import AssociateHealthCheckResponse
from huaweicloudsdkdns.v2.model.associate_router_request import AssociateRouterRequest
from huaweicloudsdkdns.v2.model.associate_router_request_body import AssociateRouterRequestBody
from huaweicloudsdkdns.v2.model.associate_router_response import AssociateRouterResponse
from huaweicloudsdkdns.v2.model.batch_create_record_set_with_line import BatchCreateRecordSetWithLine
from huaweicloudsdkdns.v2.model.batch_create_tag_request import BatchCreateTagRequest
from huaweicloudsdkdns.v2.model.batch_create_tag_response import BatchCreateTagResponse
from huaweicloudsdkdns.v2.model.batch_delete_record_set_with_line_request import BatchDeleteRecordSetWithLineRequest
from huaweicloudsdkdns.v2.model.batch_delete_record_set_with_line_request_body import BatchDeleteRecordSetWithLineRequestBody
from huaweicloudsdkdns.v2.model.batch_delete_record_set_with_line_response import BatchDeleteRecordSetWithLineResponse
from huaweicloudsdkdns.v2.model.batch_hand_tags import BatchHandTags
from huaweicloudsdkdns.v2.model.batch_update_record_set import BatchUpdateRecordSet
from huaweicloudsdkdns.v2.model.batch_update_record_set_with_line_req import BatchUpdateRecordSetWithLineReq
from huaweicloudsdkdns.v2.model.batch_update_record_set_with_line_request import BatchUpdateRecordSetWithLineRequest
from huaweicloudsdkdns.v2.model.batch_update_record_set_with_line_response import BatchUpdateRecordSetWithLineResponse
from huaweicloudsdkdns.v2.model.create_custom_line_request import CreateCustomLineRequest
from huaweicloudsdkdns.v2.model.create_custom_line_response import CreateCustomLineResponse
from huaweicloudsdkdns.v2.model.create_custom_lines import CreateCustomLines
from huaweicloudsdkdns.v2.model.create_eip_record_set_request import CreateEipRecordSetRequest
from huaweicloudsdkdns.v2.model.create_eip_record_set_response import CreateEipRecordSetResponse
from huaweicloudsdkdns.v2.model.create_line_group_request import CreateLineGroupRequest
from huaweicloudsdkdns.v2.model.create_line_group_response import CreateLineGroupResponse
from huaweicloudsdkdns.v2.model.create_line_groups_req import CreateLineGroupsReq
from huaweicloudsdkdns.v2.model.create_line_groups_resp import CreateLineGroupsResp
from huaweicloudsdkdns.v2.model.create_private_zone_req import CreatePrivateZoneReq
from huaweicloudsdkdns.v2.model.create_private_zone_request import CreatePrivateZoneRequest
from huaweicloudsdkdns.v2.model.create_private_zone_response import CreatePrivateZoneResponse
from huaweicloudsdkdns.v2.model.create_ptr_req import CreatePtrReq
from huaweicloudsdkdns.v2.model.create_public_zone_req import CreatePublicZoneReq
from huaweicloudsdkdns.v2.model.create_public_zone_request import CreatePublicZoneRequest
from huaweicloudsdkdns.v2.model.create_public_zone_response import CreatePublicZoneResponse
from huaweicloudsdkdns.v2.model.create_r_set_batch_lines_req import CreateRSetBatchLinesReq
from huaweicloudsdkdns.v2.model.create_record_set_request import CreateRecordSetRequest
from huaweicloudsdkdns.v2.model.create_record_set_request_body import CreateRecordSetRequestBody
from huaweicloudsdkdns.v2.model.create_record_set_response import CreateRecordSetResponse
from huaweicloudsdkdns.v2.model.create_record_set_with_batch_lines_request import CreateRecordSetWithBatchLinesRequest
from huaweicloudsdkdns.v2.model.create_record_set_with_batch_lines_response import CreateRecordSetWithBatchLinesResponse
from huaweicloudsdkdns.v2.model.create_record_set_with_line_request import CreateRecordSetWithLineRequest
from huaweicloudsdkdns.v2.model.create_record_set_with_line_request_body import CreateRecordSetWithLineRequestBody
from huaweicloudsdkdns.v2.model.create_record_set_with_line_response import CreateRecordSetWithLineResponse
from huaweicloudsdkdns.v2.model.create_tag_req import CreateTagReq
from huaweicloudsdkdns.v2.model.create_tag_request import CreateTagRequest
from huaweicloudsdkdns.v2.model.create_tag_response import CreateTagResponse
from huaweicloudsdkdns.v2.model.delete_custom_line_request import DeleteCustomLineRequest
from huaweicloudsdkdns.v2.model.delete_custom_line_response import DeleteCustomLineResponse
from huaweicloudsdkdns.v2.model.delete_line_group_request import DeleteLineGroupRequest
from huaweicloudsdkdns.v2.model.delete_line_group_response import DeleteLineGroupResponse
from huaweicloudsdkdns.v2.model.delete_private_zone_request import DeletePrivateZoneRequest
from huaweicloudsdkdns.v2.model.delete_private_zone_response import DeletePrivateZoneResponse
from huaweicloudsdkdns.v2.model.delete_public_zone_request import DeletePublicZoneRequest
from huaweicloudsdkdns.v2.model.delete_public_zone_response import DeletePublicZoneResponse
from huaweicloudsdkdns.v2.model.delete_record_set_request import DeleteRecordSetRequest
from huaweicloudsdkdns.v2.model.delete_record_set_response import DeleteRecordSetResponse
from huaweicloudsdkdns.v2.model.delete_record_sets_request import DeleteRecordSetsRequest
from huaweicloudsdkdns.v2.model.delete_record_sets_response import DeleteRecordSetsResponse
from huaweicloudsdkdns.v2.model.delete_tag_request import DeleteTagRequest
from huaweicloudsdkdns.v2.model.delete_tag_response import DeleteTagResponse
from huaweicloudsdkdns.v2.model.disassociate_health_check_request import DisassociateHealthCheckRequest
from huaweicloudsdkdns.v2.model.disassociate_health_check_response import DisassociateHealthCheckResponse
from huaweicloudsdkdns.v2.model.disassociate_router_request import DisassociateRouterRequest
from huaweicloudsdkdns.v2.model.disassociate_router_response import DisassociateRouterResponse
from huaweicloudsdkdns.v2.model.disassociaterouter_request_body import DisassociaterouterRequestBody
from huaweicloudsdkdns.v2.model.domain_quota_response_quotas import DomainQuotaResponseQuotas
from huaweicloudsdkdns.v2.model.line import Line
from huaweicloudsdkdns.v2.model.links_item import LinksItem
from huaweicloudsdkdns.v2.model.list_api_versions_item import ListApiVersionsItem
from huaweicloudsdkdns.v2.model.list_api_versions_request import ListApiVersionsRequest
from huaweicloudsdkdns.v2.model.list_api_versions_response import ListApiVersionsResponse
from huaweicloudsdkdns.v2.model.list_custom_line_request import ListCustomLineRequest
from huaweicloudsdkdns.v2.model.list_custom_line_response import ListCustomLineResponse
from huaweicloudsdkdns.v2.model.list_line_groups_request import ListLineGroupsRequest
from huaweicloudsdkdns.v2.model.list_line_groups_response import ListLineGroupsResponse
from huaweicloudsdkdns.v2.model.list_name_servers_request import ListNameServersRequest
from huaweicloudsdkdns.v2.model.list_name_servers_response import ListNameServersResponse
from huaweicloudsdkdns.v2.model.list_private_zones_request import ListPrivateZonesRequest
from huaweicloudsdkdns.v2.model.list_private_zones_response import ListPrivateZonesResponse
from huaweicloudsdkdns.v2.model.list_ptr_records_floating_resp import ListPtrRecordsFloatingResp
from huaweicloudsdkdns.v2.model.list_ptr_records_request import ListPtrRecordsRequest
from huaweicloudsdkdns.v2.model.list_ptr_records_response import ListPtrRecordsResponse
from huaweicloudsdkdns.v2.model.list_public_zones_request import ListPublicZonesRequest
from huaweicloudsdkdns.v2.model.list_public_zones_response import ListPublicZonesResponse
from huaweicloudsdkdns.v2.model.list_record_sets import ListRecordSets
from huaweicloudsdkdns.v2.model.list_record_sets_by_zone_request import ListRecordSetsByZoneRequest
from huaweicloudsdkdns.v2.model.list_record_sets_by_zone_response import ListRecordSetsByZoneResponse
from huaweicloudsdkdns.v2.model.list_record_sets_request import ListRecordSetsRequest
from huaweicloudsdkdns.v2.model.list_record_sets_response import ListRecordSetsResponse
from huaweicloudsdkdns.v2.model.list_record_sets_with_line_request import ListRecordSetsWithLineRequest
from huaweicloudsdkdns.v2.model.list_record_sets_with_line_response import ListRecordSetsWithLineResponse
from huaweicloudsdkdns.v2.model.list_record_sets_with_tags import ListRecordSetsWithTags
from huaweicloudsdkdns.v2.model.list_tag_req import ListTagReq
from huaweicloudsdkdns.v2.model.list_tag_request import ListTagRequest
from huaweicloudsdkdns.v2.model.list_tag_response import ListTagResponse
from huaweicloudsdkdns.v2.model.list_tags_request import ListTagsRequest
from huaweicloudsdkdns.v2.model.list_tags_response import ListTagsResponse
from huaweicloudsdkdns.v2.model.match import Match
from huaweicloudsdkdns.v2.model.metadata import Metadata
from huaweicloudsdkdns.v2.model.name_servers_resp import NameServersResp
from huaweicloudsdkdns.v2.model.nameserver import Nameserver
from huaweicloudsdkdns.v2.model.ns_records import NsRecords
from huaweicloudsdkdns.v2.model.page_link import PageLink
from huaweicloudsdkdns.v2.model.private_name_server import PrivateNameServer
from huaweicloudsdkdns.v2.model.private_zone_resp import PrivateZoneResp
from huaweicloudsdkdns.v2.model.public_zone_resp import PublicZoneResp
from huaweicloudsdkdns.v2.model.query_record_set_with_line_and_tags_resp import QueryRecordSetWithLineAndTagsResp
from huaweicloudsdkdns.v2.model.query_record_set_with_line_resp import QueryRecordSetWithLineResp
from huaweicloudsdkdns.v2.model.resource_item import ResourceItem
from huaweicloudsdkdns.v2.model.restore_ptr_record_request import RestorePtrRecordRequest
from huaweicloudsdkdns.v2.model.restore_ptr_record_response import RestorePtrRecordResponse
from huaweicloudsdkdns.v2.model.restore_ptr_req import RestorePtrReq
from huaweicloudsdkdns.v2.model.router import Router
from huaweicloudsdkdns.v2.model.router_with_status import RouterWithStatus
from huaweicloudsdkdns.v2.model.set_record_sets_status_req import SetRecordSetsStatusReq
from huaweicloudsdkdns.v2.model.set_record_sets_status_request import SetRecordSetsStatusRequest
from huaweicloudsdkdns.v2.model.set_record_sets_status_response import SetRecordSetsStatusResponse
from huaweicloudsdkdns.v2.model.show_api_info_request import ShowApiInfoRequest
from huaweicloudsdkdns.v2.model.show_api_info_response import ShowApiInfoResponse
from huaweicloudsdkdns.v2.model.show_domain_quota_request import ShowDomainQuotaRequest
from huaweicloudsdkdns.v2.model.show_domain_quota_response import ShowDomainQuotaResponse
from huaweicloudsdkdns.v2.model.show_line_group_request import ShowLineGroupRequest
from huaweicloudsdkdns.v2.model.show_line_group_response import ShowLineGroupResponse
from huaweicloudsdkdns.v2.model.show_private_zone_name_server_request import ShowPrivateZoneNameServerRequest
from huaweicloudsdkdns.v2.model.show_private_zone_name_server_response import ShowPrivateZoneNameServerResponse
from huaweicloudsdkdns.v2.model.show_private_zone_request import ShowPrivateZoneRequest
from huaweicloudsdkdns.v2.model.show_private_zone_response import ShowPrivateZoneResponse
from huaweicloudsdkdns.v2.model.show_ptr_record_set_request import ShowPtrRecordSetRequest
from huaweicloudsdkdns.v2.model.show_ptr_record_set_response import ShowPtrRecordSetResponse
from huaweicloudsdkdns.v2.model.show_public_zone_name_server_request import ShowPublicZoneNameServerRequest
from huaweicloudsdkdns.v2.model.show_public_zone_name_server_response import ShowPublicZoneNameServerResponse
from huaweicloudsdkdns.v2.model.show_public_zone_request import ShowPublicZoneRequest
from huaweicloudsdkdns.v2.model.show_public_zone_response import ShowPublicZoneResponse
from huaweicloudsdkdns.v2.model.show_record_set_by_zone_request import ShowRecordSetByZoneRequest
from huaweicloudsdkdns.v2.model.show_record_set_by_zone_resp import ShowRecordSetByZoneResp
from huaweicloudsdkdns.v2.model.show_record_set_by_zone_response import ShowRecordSetByZoneResponse
from huaweicloudsdkdns.v2.model.show_record_set_request import ShowRecordSetRequest
from huaweicloudsdkdns.v2.model.show_record_set_response import ShowRecordSetResponse
from huaweicloudsdkdns.v2.model.show_record_set_with_line_request import ShowRecordSetWithLineRequest
from huaweicloudsdkdns.v2.model.show_record_set_with_line_response import ShowRecordSetWithLineResponse
from huaweicloudsdkdns.v2.model.show_resource_tag_request import ShowResourceTagRequest
from huaweicloudsdkdns.v2.model.show_resource_tag_response import ShowResourceTagResponse
from huaweicloudsdkdns.v2.model.tag import Tag
from huaweicloudsdkdns.v2.model.tag_values import TagValues
from huaweicloudsdkdns.v2.model.update_custom_line_request import UpdateCustomLineRequest
from huaweicloudsdkdns.v2.model.update_custom_line_response import UpdateCustomLineResponse
from huaweicloudsdkdns.v2.model.update_customs_line_req import UpdateCustomsLineReq
from huaweicloudsdkdns.v2.model.update_line_groups_body import UpdateLineGroupsBody
from huaweicloudsdkdns.v2.model.update_line_groups_request import UpdateLineGroupsRequest
from huaweicloudsdkdns.v2.model.update_line_groups_response import UpdateLineGroupsResponse
from huaweicloudsdkdns.v2.model.update_private_zone_info_req import UpdatePrivateZoneInfoReq
from huaweicloudsdkdns.v2.model.update_private_zone_request import UpdatePrivateZoneRequest
from huaweicloudsdkdns.v2.model.update_private_zone_response import UpdatePrivateZoneResponse
from huaweicloudsdkdns.v2.model.update_ptr_record_request import UpdatePtrRecordRequest
from huaweicloudsdkdns.v2.model.update_ptr_record_response import UpdatePtrRecordResponse
from huaweicloudsdkdns.v2.model.update_ptr_req import UpdatePtrReq
from huaweicloudsdkdns.v2.model.update_public_zone_info import UpdatePublicZoneInfo
from huaweicloudsdkdns.v2.model.update_public_zone_request import UpdatePublicZoneRequest
from huaweicloudsdkdns.v2.model.update_public_zone_response import UpdatePublicZoneResponse
from huaweicloudsdkdns.v2.model.update_public_zone_status_request import UpdatePublicZoneStatusRequest
from huaweicloudsdkdns.v2.model.update_public_zone_status_request_body import UpdatePublicZoneStatusRequestBody
from huaweicloudsdkdns.v2.model.update_public_zone_status_response import UpdatePublicZoneStatusResponse
from huaweicloudsdkdns.v2.model.update_record_set_req import UpdateRecordSetReq
from huaweicloudsdkdns.v2.model.update_record_set_request import UpdateRecordSetRequest
from huaweicloudsdkdns.v2.model.update_record_set_response import UpdateRecordSetResponse
from huaweicloudsdkdns.v2.model.update_record_sets_req import UpdateRecordSetsReq
from huaweicloudsdkdns.v2.model.update_record_sets_request import UpdateRecordSetsRequest
from huaweicloudsdkdns.v2.model.update_record_sets_response import UpdateRecordSetsResponse
from huaweicloudsdkdns.v2.model.values_item import ValuesItem
from huaweicloudsdkdns.v2.model.version_item import VersionItem
|
fc045d0e57bb7f2dae55ae07d0497716bb7ca171
|
27b86f422246a78704e0e84983b2630533a47db6
|
/examples/encode_decode_dxf_as_base64.py
|
e1f4e8425d8ef5ca5c9ec0f72e32dc260866629d
|
[
"MIT"
] |
permissive
|
mozman/ezdxf
|
7512decd600896960660f0f580cab815bf0d7a51
|
ba6ab0264dcb6833173042a37b1b5ae878d75113
|
refs/heads/master
| 2023-09-01T11:55:13.462105
| 2023-08-15T11:50:05
| 2023-08-15T12:00:04
| 79,697,117
| 750
| 194
|
MIT
| 2023-09-14T09:40:41
| 2017-01-22T05:55:55
|
Python
|
UTF-8
|
Python
| false
| false
| 1,247
|
py
|
encode_decode_dxf_as_base64.py
|
# Copyright (c) 2020, Joseph Flack
# License: MIT License
import ezdxf
from ezdxf.document import Drawing
# ------------------------------------------------------------------------------
# encode and decode DXF documents as base64 data for upload and download in web
# applications
# ------------------------------------------------------------------------------
def get_dxf_doc_from_upload_data(data: bytes) -> Drawing:
"""
This function turns the DXF data provided by Dash Plotly upload component
into an `ezdxf` DXF document. Dash plotly upload component only provides
base 64 encoded data.
Args:
data: DXF document uploaded as base64 encoded data
Returns:
DXF document as Drawing() object
"""
# Remove the mime-type and encoding info from data
# example: data:application/octet-stream;base64,OTk5DQpkeGZydyAwLjYuMw0KICAwDQpTRUNUSU9ODQogIDINCkhFQURFUg0KICA...
_, data = data.split(b",")
return ezdxf.decode_base64(data)
def encode_base64(doc: Drawing) -> bytes:
return b"data:application/octet-stream;base64," + doc.encode_base64()
if __name__ == "__main__":
data = encode_base64(ezdxf.new())
doc = get_dxf_doc_from_upload_data(data)
print(doc.acad_release)
|
e8b7d7ae30651207bf8df7c2212942a4767101ce
|
85ccd32aa73eecf274a937f1fc3b6f4d484b77da
|
/test cases/unit/48 testsetup default/envcheck.py
|
34ad76d4e5ce8da455cafc9102fdf45e24d1606c
|
[
"Apache-2.0"
] |
permissive
|
mesonbuild/meson
|
48321cf4235dfcc0194fed90ff43a57367592bf7
|
cf5adf0c646474f0259d123fad60ca5ed38ec891
|
refs/heads/master
| 2023-09-01T05:58:50.807952
| 2023-03-17T20:27:37
| 2023-08-31T11:52:41
| 19,784,232
| 5,122
| 1,848
|
Apache-2.0
| 2023-09-14T15:47:23
| 2014-05-14T15:08:16
|
Python
|
UTF-8
|
Python
| false
| false
| 240
|
py
|
envcheck.py
|
#!/usr/bin/env python3
import os
assert 'ENV_A' in os.environ
assert 'ENV_B' in os.environ
assert 'ENV_C' in os.environ
print('ENV_A is', os.environ['ENV_A'])
print('ENV_B is', os.environ['ENV_B'])
print('ENV_C is', os.environ['ENV_C'])
|
fcc43fd899805dc5fbfcd3a86a7a7aacafd2d1c8
|
b4cbdad3d0efbe9899fbaebd31765e9f6b704cb3
|
/sdxl_train_control_net_lllite.py
|
09cf164381a58b05a3e33518691a01bcbe7f4d08
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
bmaltais/kohya_ss
|
aff3100961a38a25ee7a037a708232ce9e6998d9
|
2853f4cec850557e83882a4072cfe416cb9c57d7
|
refs/heads/master
| 2023-09-03T16:59:12.189406
| 2023-08-23T11:15:37
| 2023-08-23T11:15:37
| 559,584,476
| 5,495
| 711
|
Apache-2.0
| 2023-09-08T12:24:04
| 2022-10-30T15:15:32
|
Python
|
UTF-8
|
Python
| false
| false
| 26,288
|
py
|
sdxl_train_control_net_lllite.py
|
import argparse
import gc
import json
import math
import os
import random
import time
from multiprocessing import Value
from types import SimpleNamespace
import toml
from tqdm import tqdm
import torch
from torch.nn.parallel import DistributedDataParallel as DDP
from accelerate.utils import set_seed
from diffusers import DDPMScheduler, ControlNetModel
from safetensors.torch import load_file
from library import sai_model_spec, sdxl_model_util, sdxl_original_unet, sdxl_train_util
import library.model_util as model_util
import library.train_util as train_util
import library.config_util as config_util
from library.config_util import (
ConfigSanitizer,
BlueprintGenerator,
)
import library.huggingface_util as huggingface_util
import library.custom_train_functions as custom_train_functions
from library.custom_train_functions import (
add_v_prediction_like_loss,
apply_snr_weight,
prepare_scheduler_for_custom_training,
pyramid_noise_like,
apply_noise_offset,
scale_v_prediction_loss_like_noise_prediction,
)
import networks.control_net_lllite as control_net_lllite
# TODO 他のスクリプトと共通化する
def generate_step_logs(args: argparse.Namespace, current_loss, avr_loss, lr_scheduler):
logs = {
"loss/current": current_loss,
"loss/average": avr_loss,
"lr": lr_scheduler.get_last_lr()[0],
}
if args.optimizer_type.lower().startswith("DAdapt".lower()):
logs["lr/d*lr"] = lr_scheduler.optimizers[-1].param_groups[0]["d"] * lr_scheduler.optimizers[-1].param_groups[0]["lr"]
return logs
def train(args):
train_util.verify_training_args(args)
train_util.prepare_dataset_args(args, True)
sdxl_train_util.verify_sdxl_training_args(args)
cache_latents = args.cache_latents
use_user_config = args.dataset_config is not None
if args.seed is None:
args.seed = random.randint(0, 2**32)
set_seed(args.seed)
tokenizer1, tokenizer2 = sdxl_train_util.load_tokenizers(args)
# データセットを準備する
blueprint_generator = BlueprintGenerator(ConfigSanitizer(False, False, True, True))
if use_user_config:
print(f"Load dataset config from {args.dataset_config}")
user_config = config_util.load_user_config(args.dataset_config)
ignored = ["train_data_dir", "conditioning_data_dir"]
if any(getattr(args, attr) is not None for attr in ignored):
print(
"ignore following options because config file is found: {0} / 設定ファイルが利用されるため以下のオプションは無視されます: {0}".format(
", ".join(ignored)
)
)
else:
user_config = {
"datasets": [
{
"subsets": config_util.generate_controlnet_subsets_config_by_subdirs(
args.train_data_dir,
args.conditioning_data_dir,
args.caption_extension,
)
}
]
}
blueprint = blueprint_generator.generate(user_config, args, tokenizer=[tokenizer1, tokenizer2])
train_dataset_group = config_util.generate_dataset_group_by_blueprint(blueprint.dataset_group)
current_epoch = Value("i", 0)
current_step = Value("i", 0)
ds_for_collater = train_dataset_group if args.max_data_loader_n_workers == 0 else None
collater = train_util.collater_class(current_epoch, current_step, ds_for_collater)
train_dataset_group.verify_bucket_reso_steps(32)
if args.debug_dataset:
train_util.debug_dataset(train_dataset_group)
return
if len(train_dataset_group) == 0:
print(
"No data found. Please verify arguments (train_data_dir must be the parent of folders with images) / 画像がありません。引数指定を確認してください(train_data_dirには画像があるフォルダではなく、画像があるフォルダの親フォルダを指定する必要があります)"
)
return
if cache_latents:
assert (
train_dataset_group.is_latent_cacheable()
), "when caching latents, either color_aug or random_crop cannot be used / latentをキャッシュするときはcolor_augとrandom_cropは使えません"
else:
print("WARNING: random_crop is not supported yet for ControlNet training / ControlNetの学習ではrandom_cropはまだサポートされていません")
if args.cache_text_encoder_outputs:
assert (
train_dataset_group.is_text_encoder_output_cacheable()
), "when caching Text Encoder output, either caption_dropout_rate, shuffle_caption, token_warmup_step or caption_tag_dropout_rate cannot be used / Text Encoderの出力をキャッシュするときはcaption_dropout_rate, shuffle_caption, token_warmup_step, caption_tag_dropout_rateは使えません"
# acceleratorを準備する
print("prepare accelerator")
accelerator = train_util.prepare_accelerator(args)
is_main_process = accelerator.is_main_process
# mixed precisionに対応した型を用意しておき適宜castする
weight_dtype, save_dtype = train_util.prepare_dtype(args)
vae_dtype = torch.float32 if args.no_half_vae else weight_dtype
# モデルを読み込む
(
load_stable_diffusion_format,
text_encoder1,
text_encoder2,
vae,
unet,
logit_scale,
ckpt_info,
) = sdxl_train_util.load_target_model(args, accelerator, sdxl_model_util.MODEL_VERSION_SDXL_BASE_V1_0, weight_dtype)
# モデルに xformers とか memory efficient attention を組み込む
train_util.replace_unet_modules(unet, args.mem_eff_attn, args.xformers, args.sdpa)
# 学習を準備する
if cache_latents:
vae.to(accelerator.device, dtype=vae_dtype)
vae.requires_grad_(False)
vae.eval()
with torch.no_grad():
train_dataset_group.cache_latents(
vae,
args.vae_batch_size,
args.cache_latents_to_disk,
accelerator.is_main_process,
)
vae.to("cpu")
if torch.cuda.is_available():
torch.cuda.empty_cache()
gc.collect()
accelerator.wait_for_everyone()
# TextEncoderの出力をキャッシュする
if args.cache_text_encoder_outputs:
# Text Encodes are eval and no grad
with torch.no_grad():
train_dataset_group.cache_text_encoder_outputs(
(tokenizer1, tokenizer2),
(text_encoder1, text_encoder2),
accelerator.device,
None,
args.cache_text_encoder_outputs_to_disk,
accelerator.is_main_process,
)
accelerator.wait_for_everyone()
# prepare ControlNet
network = control_net_lllite.ControlNetLLLite(unet, args.cond_emb_dim, args.network_dim, args.network_dropout)
network.apply_to()
if args.network_weights is not None:
info = network.load_weights(args.network_weights)
accelerator.print(f"load ControlNet weights from {args.network_weights}: {info}")
if args.gradient_checkpointing:
unet.enable_gradient_checkpointing()
network.enable_gradient_checkpointing() # may have no effect
# 学習に必要なクラスを準備する
accelerator.print("prepare optimizer, data loader etc.")
trainable_params = list(network.prepare_optimizer_params())
print(f"trainable params count: {len(trainable_params)}")
print(f"number of trainable parameters: {sum(p.numel() for p in trainable_params if p.requires_grad)}")
_, _, optimizer = train_util.get_optimizer(args, trainable_params)
# dataloaderを準備する
# DataLoaderのプロセス数:0はメインプロセスになる
n_workers = min(args.max_data_loader_n_workers, os.cpu_count() - 1) # cpu_count-1 ただし最大で指定された数まで
train_dataloader = torch.utils.data.DataLoader(
train_dataset_group,
batch_size=1,
shuffle=True,
collate_fn=collater,
num_workers=n_workers,
persistent_workers=args.persistent_data_loader_workers,
)
# 学習ステップ数を計算する
if args.max_train_epochs is not None:
args.max_train_steps = args.max_train_epochs * math.ceil(
len(train_dataloader) / accelerator.num_processes / args.gradient_accumulation_steps
)
accelerator.print(f"override steps. steps for {args.max_train_epochs} epochs is / 指定エポックまでのステップ数: {args.max_train_steps}")
# データセット側にも学習ステップを送信
train_dataset_group.set_max_train_steps(args.max_train_steps)
# lr schedulerを用意する
lr_scheduler = train_util.get_scheduler_fix(args, optimizer, accelerator.num_processes)
# 実験的機能:勾配も含めたfp16/bf16学習を行う モデル全体をfp16/bf16にする
if args.full_fp16:
assert (
args.mixed_precision == "fp16"
), "full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。"
accelerator.print("enable full fp16 training.")
unet.to(weight_dtype)
network.to(weight_dtype)
elif args.full_bf16:
assert (
args.mixed_precision == "bf16"
), "full_bf16 requires mixed precision='bf16' / full_bf16を使う場合はmixed_precision='bf16'を指定してください。"
accelerator.print("enable full bf16 training.")
unet.to(weight_dtype)
network.to(weight_dtype)
# acceleratorがなんかよろしくやってくれるらしい
unet, network, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
unet, network, optimizer, train_dataloader, lr_scheduler
)
network: control_net_lllite.ControlNetLLLite
# transform DDP after prepare (train_network here only)
unet, network = train_util.transform_models_if_DDP([unet, network])
if args.gradient_checkpointing:
unet.train() # according to TI example in Diffusers, train is required -> これオリジナルのU-Netしたので本当は外せる
else:
unet.eval()
network.prepare_grad_etc()
# TextEncoderの出力をキャッシュするときにはCPUへ移動する
if args.cache_text_encoder_outputs:
# move Text Encoders for sampling images. Text Encoder doesn't work on CPU with fp16
text_encoder1.to("cpu", dtype=torch.float32)
text_encoder2.to("cpu", dtype=torch.float32)
if torch.cuda.is_available():
torch.cuda.empty_cache()
else:
# make sure Text Encoders are on GPU
text_encoder1.to(accelerator.device)
text_encoder2.to(accelerator.device)
if not cache_latents:
vae.requires_grad_(False)
vae.eval()
vae.to(accelerator.device, dtype=vae_dtype)
# 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする
if args.full_fp16:
train_util.patch_accelerator_for_fp16_training(accelerator)
# resumeする
train_util.resume_from_local_or_hf_if_specified(accelerator, args)
# epoch数を計算する
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
if (args.save_n_epoch_ratio is not None) and (args.save_n_epoch_ratio > 0):
args.save_every_n_epochs = math.floor(num_train_epochs / args.save_n_epoch_ratio) or 1
# 学習する
# TODO: find a way to handle total batch size when there are multiple datasets
accelerator.print("running training / 学習開始")
accelerator.print(f" num train images * repeats / 学習画像の数×繰り返し回数: {train_dataset_group.num_train_images}")
accelerator.print(f" num reg images / 正則化画像の数: {train_dataset_group.num_reg_images}")
accelerator.print(f" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}")
accelerator.print(f" num epochs / epoch数: {num_train_epochs}")
accelerator.print(f" batch size per device / バッチサイズ: {', '.join([str(d.batch_size) for d in train_dataset_group.datasets])}")
# print(f" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}")
accelerator.print(f" gradient accumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}")
accelerator.print(f" total optimization steps / 学習ステップ数: {args.max_train_steps}")
progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc="steps")
global_step = 0
noise_scheduler = DDPMScheduler(
beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000, clip_sample=False
)
prepare_scheduler_for_custom_training(noise_scheduler, accelerator.device)
if args.zero_terminal_snr:
custom_train_functions.fix_noise_scheduler_betas_for_zero_terminal_snr(noise_scheduler)
if accelerator.is_main_process:
init_kwargs = {}
if args.log_tracker_config is not None:
init_kwargs = toml.load(args.log_tracker_config)
accelerator.init_trackers(
"lllite_control_net_train" if args.log_tracker_name is None else args.log_tracker_name, init_kwargs=init_kwargs
)
loss_list = []
loss_total = 0.0
del train_dataset_group
# function for saving/removing
def save_model(ckpt_name, unwrapped_nw, steps, epoch_no, force_sync_upload=False):
os.makedirs(args.output_dir, exist_ok=True)
ckpt_file = os.path.join(args.output_dir, ckpt_name)
accelerator.print(f"\nsaving checkpoint: {ckpt_file}")
sai_metadata = train_util.get_sai_model_spec(None, args, True, True, False)
sai_metadata["modelspec.architecture"] = sai_model_spec.ARCH_SD_XL_V1_BASE + "/control-net-lllite"
unwrapped_nw.save_weights(ckpt_file, save_dtype, sai_metadata)
if args.huggingface_repo_id is not None:
huggingface_util.upload(args, ckpt_file, "/" + ckpt_name, force_sync_upload=force_sync_upload)
def remove_model(old_ckpt_name):
old_ckpt_file = os.path.join(args.output_dir, old_ckpt_name)
if os.path.exists(old_ckpt_file):
accelerator.print(f"removing old checkpoint: {old_ckpt_file}")
os.remove(old_ckpt_file)
# training loop
for epoch in range(num_train_epochs):
accelerator.print(f"\nepoch {epoch+1}/{num_train_epochs}")
current_epoch.value = epoch + 1
network.on_epoch_start() # train()
for step, batch in enumerate(train_dataloader):
current_step.value = global_step
with accelerator.accumulate(network):
with torch.no_grad():
if "latents" in batch and batch["latents"] is not None:
latents = batch["latents"].to(accelerator.device)
else:
# latentに変換
latents = vae.encode(batch["images"].to(dtype=vae_dtype)).latent_dist.sample()
# NaNが含まれていれば警告を表示し0に置き換える
if torch.any(torch.isnan(latents)):
accelerator.print("NaN found in latents, replacing with zeros")
latents = torch.where(torch.isnan(latents), torch.zeros_like(latents), latents)
latents = latents * sdxl_model_util.VAE_SCALE_FACTOR
if "text_encoder_outputs1_list" not in batch or batch["text_encoder_outputs1_list"] is None:
input_ids1 = batch["input_ids"]
input_ids2 = batch["input_ids2"]
with torch.no_grad():
# Get the text embedding for conditioning
input_ids1 = input_ids1.to(accelerator.device)
input_ids2 = input_ids2.to(accelerator.device)
encoder_hidden_states1, encoder_hidden_states2, pool2 = train_util.get_hidden_states_sdxl(
args.max_token_length,
input_ids1,
input_ids2,
tokenizer1,
tokenizer2,
text_encoder1,
text_encoder2,
None if not args.full_fp16 else weight_dtype,
)
else:
encoder_hidden_states1 = batch["text_encoder_outputs1_list"].to(accelerator.device).to(weight_dtype)
encoder_hidden_states2 = batch["text_encoder_outputs2_list"].to(accelerator.device).to(weight_dtype)
pool2 = batch["text_encoder_pool2_list"].to(accelerator.device).to(weight_dtype)
# get size embeddings
orig_size = batch["original_sizes_hw"]
crop_size = batch["crop_top_lefts"]
target_size = batch["target_sizes_hw"]
embs = sdxl_train_util.get_size_embeddings(orig_size, crop_size, target_size, accelerator.device).to(weight_dtype)
# concat embeddings
vector_embedding = torch.cat([pool2, embs], dim=1).to(weight_dtype)
text_embedding = torch.cat([encoder_hidden_states1, encoder_hidden_states2], dim=2).to(weight_dtype)
# Sample noise, sample a random timestep for each image, and add noise to the latents,
# with noise offset and/or multires noise if specified
noise, noisy_latents, timesteps = train_util.get_noise_noisy_latents_and_timesteps(args, noise_scheduler, latents)
noisy_latents = noisy_latents.to(weight_dtype) # TODO check why noisy_latents is not weight_dtype
controlnet_image = batch["conditioning_images"].to(dtype=weight_dtype)
with accelerator.autocast():
# conditioning imageをControlNetに渡す / pass conditioning image to ControlNet
# 内部でcond_embに変換される / it will be converted to cond_emb inside
network.set_cond_image(controlnet_image)
# それらの値を使いつつ、U-Netでノイズを予測する / predict noise with U-Net using those values
noise_pred = unet(noisy_latents, timesteps, text_embedding, vector_embedding)
if args.v_parameterization:
# v-parameterization training
target = noise_scheduler.get_velocity(latents, noise, timesteps)
else:
target = noise
loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction="none")
loss = loss.mean([1, 2, 3])
loss_weights = batch["loss_weights"] # 各sampleごとのweight
loss = loss * loss_weights
if args.min_snr_gamma:
loss = apply_snr_weight(loss, timesteps, noise_scheduler, args.min_snr_gamma)
if args.scale_v_pred_loss_like_noise_pred:
loss = scale_v_prediction_loss_like_noise_prediction(loss, timesteps, noise_scheduler)
if args.v_pred_like_loss:
loss = add_v_prediction_like_loss(loss, timesteps, noise_scheduler, args.v_pred_like_loss)
loss = loss.mean() # 平均なのでbatch_sizeで割る必要なし
accelerator.backward(loss)
if accelerator.sync_gradients and args.max_grad_norm != 0.0:
params_to_clip = network.get_trainable_params()
accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad(set_to_none=True)
# Checks if the accelerator has performed an optimization step behind the scenes
if accelerator.sync_gradients:
progress_bar.update(1)
global_step += 1
# sdxl_train_util.sample_images(accelerator, args, None, global_step, accelerator.device, vae, tokenizer, text_encoder, unet)
# 指定ステップごとにモデルを保存
if args.save_every_n_steps is not None and global_step % args.save_every_n_steps == 0:
accelerator.wait_for_everyone()
if accelerator.is_main_process:
ckpt_name = train_util.get_step_ckpt_name(args, "." + args.save_model_as, global_step)
save_model(ckpt_name, accelerator.unwrap_model(network), global_step, epoch)
if args.save_state:
train_util.save_and_remove_state_stepwise(args, accelerator, global_step)
remove_step_no = train_util.get_remove_step_no(args, global_step)
if remove_step_no is not None:
remove_ckpt_name = train_util.get_step_ckpt_name(args, "." + args.save_model_as, remove_step_no)
remove_model(remove_ckpt_name)
current_loss = loss.detach().item()
if epoch == 0:
loss_list.append(current_loss)
else:
loss_total -= loss_list[step]
loss_list[step] = current_loss
loss_total += current_loss
avr_loss = loss_total / len(loss_list)
logs = {"loss": avr_loss} # , "lr": lr_scheduler.get_last_lr()[0]}
progress_bar.set_postfix(**logs)
if args.logging_dir is not None:
logs = generate_step_logs(args, current_loss, avr_loss, lr_scheduler)
accelerator.log(logs, step=global_step)
if global_step >= args.max_train_steps:
break
if args.logging_dir is not None:
logs = {"loss/epoch": loss_total / len(loss_list)}
accelerator.log(logs, step=epoch + 1)
accelerator.wait_for_everyone()
# 指定エポックごとにモデルを保存
if args.save_every_n_epochs is not None:
saving = (epoch + 1) % args.save_every_n_epochs == 0 and (epoch + 1) < num_train_epochs
if is_main_process and saving:
ckpt_name = train_util.get_epoch_ckpt_name(args, "." + args.save_model_as, epoch + 1)
save_model(ckpt_name, accelerator.unwrap_model(network), global_step, epoch + 1)
remove_epoch_no = train_util.get_remove_epoch_no(args, epoch + 1)
if remove_epoch_no is not None:
remove_ckpt_name = train_util.get_epoch_ckpt_name(args, "." + args.save_model_as, remove_epoch_no)
remove_model(remove_ckpt_name)
if args.save_state:
train_util.save_and_remove_state_on_epoch_end(args, accelerator, epoch + 1)
# self.sample_images(accelerator, args, epoch + 1, global_step, accelerator.device, vae, tokenizer, text_encoder, unet)
# end of epoch
if is_main_process:
network = accelerator.unwrap_model(network)
accelerator.end_training()
if is_main_process and args.save_state:
train_util.save_state_on_train_end(args, accelerator)
if is_main_process:
ckpt_name = train_util.get_last_ckpt_name(args, "." + args.save_model_as)
save_model(ckpt_name, network, global_step, num_train_epochs, force_sync_upload=True)
print("model saved.")
def setup_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser()
train_util.add_sd_models_arguments(parser)
train_util.add_dataset_arguments(parser, False, True, True)
train_util.add_training_arguments(parser, False)
train_util.add_optimizer_arguments(parser)
config_util.add_config_arguments(parser)
custom_train_functions.add_custom_train_arguments(parser)
sdxl_train_util.add_sdxl_training_arguments(parser)
parser.add_argument(
"--save_model_as",
type=str,
default="safetensors",
choices=[None, "ckpt", "pt", "safetensors"],
help="format to save the model (default is .safetensors) / モデル保存時の形式(デフォルトはsafetensors)",
)
parser.add_argument("--cond_emb_dim", type=int, default=None, help="conditioning embedding dimension / 条件付け埋め込みの次元数")
parser.add_argument("--network_weights", type=str, default=None, help="pretrained weights for network / 学習するネットワークの初期重み")
parser.add_argument("--network_dim", type=int, default=None, help="network dimensions (rank) / モジュールの次元数")
parser.add_argument(
"--network_dropout",
type=float,
default=None,
help="Drops neurons out of training every step (0 or None is default behavior (no dropout), 1 would drop all neurons) / 訓練時に毎ステップでニューロンをdropする(0またはNoneはdropoutなし、1は全ニューロンをdropout)",
)
parser.add_argument(
"--conditioning_data_dir",
type=str,
default=None,
help="conditioning data directory / 条件付けデータのディレクトリ",
)
parser.add_argument(
"--no_half_vae",
action="store_true",
help="do not use fp16/bf16 VAE in mixed precision (use float VAE) / mixed precisionでも fp16/bf16 VAEを使わずfloat VAEを使う",
)
return parser
if __name__ == "__main__":
# sdxl_original_unet.USE_REENTRANT = False
parser = setup_parser()
args = parser.parse_args()
args = train_util.read_config_from_file(args, parser)
train(args)
|
da6175e3331848d9261d0bb9246509c007e6250a
|
26bbcfdb811f7df13f7b5a95ba551da7adac4e9b
|
/src/certfuzz/fuzzers/fuzzer_base.py
|
8ff16d17ddd68edec8d2e1b26e06d8307d050580
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
CERTCC/certfuzz
|
080c3a5448a39d02049253fad96498ba50191586
|
892dae8676535b0ae5b77eea95ffbc21e9e1c959
|
refs/heads/develop
| 2022-11-11T06:12:09.032184
| 2020-06-10T19:57:26
| 2020-06-10T19:57:26
| 20,684,363
| 161
| 25
|
NOASSERTION
| 2023-05-10T14:27:00
| 2014-06-10T12:29:53
|
Python
|
UTF-8
|
Python
| false
| false
| 7,578
|
py
|
fuzzer_base.py
|
'''
Created on Feb 3, 2012
@organization: cert.org
'''
import StringIO
import collections
import logging
import os
import zipfile
from certfuzz.fuzztools.filetools import find_or_create_dir, write_file
from certfuzz.helpers.misc import log_object
MAXDEPTH = 3
SLEEPTIMER = 0.5
BACKOFF_FACTOR = 2
logger = logging.getLogger(__name__)
def logerror(func, path, excinfo):
logger.warning('%s failed to remove %s: %s', func, path, excinfo)
def is_fuzzable(x, exclude_list):
'''
Returns true if x is not in any range in range_list
:param x:
:param range_list:
'''
if exclude_list is None:
exclude_list = []
for (low, high) in exclude_list:
if low <= x <= high:
return False
return True
class Fuzzer(object):
'''
The Fuzzer class is intended to be used as the parent class for actual
fuzzer implementations. It should be implemented in a runtime context using
the 'with' construct:
with Fuzzer(*args) as fuzzer:
fuzzer.go()
'''
# Not all fuzzers are minimizable. Default to false, and those
# child classes that are can set it themselves
is_minimizable = False
def __init__(self, seedfile_obj, outdir_base, iteration, options):
'''
Parameters get converted to attributes.
@param local_seed_path:
@param fuzz_output_path:
@param iteration:
@param options:
'''
logger.debug('Initialize Fuzzer')
self.sf = seedfile_obj
# TODO: rename tmpdir -> working_dir
self.tmpdir = outdir_base
self.rng_seed = int(self.sf.md5, 16)
self.iteration = iteration
self.options = options
# set up some file name related attributes
self.basename_fuzzed = '%s-%d%s' % (self.sf.root, self.iteration, self.sf.ext)
self.output_file_path = os.path.join(self.tmpdir, self.basename_fuzzed)
self.input = None
self.output = None
self.fuzzed_changes_input = True
# Not all fuzzers use rangefinder. Default to None and
# set it in child classes for those that do
self.range = None
self.saved_arcinfo = collections.OrderedDict()
self._parse_options()
log_object(self, logger)
def __enter__(self):
find_or_create_dir(self.tmpdir)
self.input = bytearray(self.sf.read())
self._validate()
return self
def __exit__(self, etype, value, traceback):
pass
def write_fuzzed(self, outdir=None):
if outdir:
outfile = os.path.join(outdir, self.basename_fuzzed)
else:
outfile = self.output_file_path
if self.output:
write_file(self.output, outfile)
self.output_file_path = outfile
return os.path.exists(outfile)
def fuzz(self):
if not self.output:
self._prefuzz()
self._fuzz()
self._postfuzz()
# if self.fuzzed_changes_input:
# self._verify()
return self.write_fuzzed()
# def _verify(self):
# '''
# Override or augment with your own verification.
# Typically it's enough to confirm that the output differs from the input
# '''
#
# # throw an exception if for some reason we didn't fuzz the input
# # some fuzzers don't materially alter the file every time, e.g., swap
# if self.input == self.output:
# raise FuzzerInputMatchesOutputError('Fuzz failed: input matches output')
def _prefuzz(self):
'''
Override this method if you want to do some processing before you call
_fuzz
'''
pass
def _postfuzz(self):
'''
Override this method if you want to do some post-processing after
calling _fuzz
'''
pass
def _fuzz(self):
'''
Override this method to implement your fuzzer. The seed file contents
are in self.input. Put the output into self.output.
'''
# disable fuzzed_changes_input since we're copying in -> out
self.fuzzed_changes_input = False
self.output = self.input
def _validate(self):
'''
Placeholder for subclass methods.
Raise exceptions if the fuzzer doesn't have what it needs to run.
'''
pass
def _parse_options(self):
'''
Placeholder for subclass methods
'''
pass
class MinimizableFuzzer(Fuzzer):
'''
Convenience class to be used as parent of all minimizable fuzzers (i.e.,
those change more than one byte but do not alter the length of the file)
'''
is_minimizable = True
def _prefuzz(self):
if self.options.get('fuzz_zip_container') or not self.sf.is_zip:
return
# If the seed is zip-based, fuzz the contents rather than the container
inmemseed = StringIO.StringIO(self.input)
try:
tempzip = zipfile.ZipFile(inmemseed, 'r')
except:
logger.warning('Bad zip file. Falling back to mutating container.')
self.sf.is_zip = False
inmemseed.close()
return
'''
get info on all the archived files and concatentate their contents
into self.input
'''
self.zipinput = bytearray()
logger.debug('Reading files from zip...')
for i in tempzip.namelist():
try:
data = tempzip.read(i)
except:
# BadZipfile or encrypted
logger.warning('Bad zip file. Falling back to mutating container.')
self.sf.is_zip = False
tempzip.close()
inmemseed.close()
return
# save split indices and compression type for archival
# reconstruction
# save compress type
self.saved_arcinfo[i] = (len(self.zipinput), len(data),
tempzip.getinfo(i).compress_type)
self.zipinput += data
tempzip.close()
inmemseed.close()
# Zip processing went fine, so use the zip contents as self.input to fuzzer
self.input = self.zipinput
def _postfuzz(self):
if self.options.get('fuzz_zip_container') or not self.sf.is_zip:
return
'''rebuild the zip file and put it in self.output
Note: We assume that the fuzzer has not changes the lengths
of the archived files, otherwise we won't be able to properly
split self.output
'''
logger.debug('Creating in-memory zip with mutated contents.')
inmemzip = StringIO.StringIO()
tempzip = zipfile.ZipFile(inmemzip, 'w')
'''
reconstruct archived files, using the same compression scheme as the
source
'''
for name, info in self.saved_arcinfo.iteritems():
# write out output file
if info[2] == 0 or info[2] == 8:
# Python zipfile only supports compression types 0 and 8
compressiontype = info[2]
else:
logger.warning('Compression type %s is not supported. Overriding', info[2])
compressiontype = 8
tempzip.writestr(name, str(self.output[info[0]:info[0] + info[1]]),
compress_type=compressiontype)
tempzip.close()
# get the byte string version of the archive and put in self.output
self.output = inmemzip.getvalue()
inmemzip.close()
|
85c0bffba822d03a405327519f28121b57f2ce3e
|
c268dcf432f3b7171be6eb307aafbe1bd173285a
|
/reddit2telegram/channels/lyricalquotes/app.py
|
40cd4d87d63b5bfaa960c885adbc423c3ba68ffe
|
[
"MIT"
] |
permissive
|
Fillll/reddit2telegram
|
a7162da2cc08c81bcc8078ea4160d4ee07461fee
|
5d8ee3097e716734d55a72f5a16ce3d7467e2ed7
|
refs/heads/master
| 2023-08-09T10:34:16.163262
| 2023-07-30T18:36:19
| 2023-07-30T18:36:19
| 67,726,018
| 258
| 205
|
MIT
| 2023-09-07T02:36:36
| 2016-09-08T17:39:46
|
Python
|
UTF-8
|
Python
| false
| false
| 208
|
py
|
app.py
|
#encoding:utf-8
subreddit = 'quotes'
t_channel = '@lyricalquotes'
def send_post(submission, r2t):
return r2t.send_simple(submission,
text='{title}',
disable_web_page_preview=True
)
|
fa4ac2ea048fb5dc9c1d3727187676f5f65b2f5f
|
091155389673325cfe8b0da3dc64c113f1ded707
|
/cvpods/evaluation/registry.py
|
ef91cb2021d21c3d02db463fadc3bfb669f2b7d2
|
[
"Apache-2.0"
] |
permissive
|
Megvii-BaseDetection/cvpods
|
7b7c808257b757d7f94d520ea03b370105fb05eb
|
2deea5dc659371318c8a570c644201d913a83027
|
refs/heads/master
| 2023-03-22T00:26:06.248877
| 2023-03-10T10:05:26
| 2023-03-10T10:05:26
| 318,124,806
| 659
| 91
|
Apache-2.0
| 2023-03-10T10:05:28
| 2020-12-03T08:26:57
|
Python
|
UTF-8
|
Python
| false
| false
| 172
|
py
|
registry.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Copyright (C) 2019-2021 Megvii Inc. All rights reserved.
from cvpods.utils import Registry
EVALUATOR = Registry("evaluator")
|
898ca2580c26ffce6ccd0164c1e9a90478f79731
|
0a616a3ec554abb13e0cd283419a5355e3e23052
|
/nltk/misc/sort.py
|
4e5ef9f32ac9efed5f9179c02b42cb77cea6ea5f
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"CC-BY-NC-ND-3.0",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
nltk/nltk
|
79487b84bb96aab4bfc2acde3e12ce34a42f5c73
|
582e6e35f0e6c984b44ec49dcb8846d9c011d0a8
|
refs/heads/develop
| 2023-09-05T04:05:54.002721
| 2023-06-02T02:45:47
| 2023-06-02T02:45:47
| 299,862
| 11,860
| 3,171
|
Apache-2.0
| 2023-08-02T03:12:33
| 2009-09-07T10:53:58
|
Python
|
UTF-8
|
Python
| false
| false
| 4,371
|
py
|
sort.py
|
# Natural Language Toolkit: List Sorting
#
# Copyright (C) 2001-2023 NLTK Project
# Author: Steven Bird <stevenbird1@gmail.com>
# URL: <https://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
This module provides a variety of list sorting algorithms, to
illustrate the many different algorithms (recipes) for solving a
problem, and how to analyze algorithms experimentally.
"""
# These algorithms are taken from:
# Levitin (2004) The Design and Analysis of Algorithms
##################################################################
# Selection Sort
##################################################################
def selection(a):
"""
Selection Sort: scan the list to find its smallest element, then
swap it with the first element. The remainder of the list is one
element smaller; apply the same method to this list, and so on.
"""
count = 0
for i in range(len(a) - 1):
min = i
for j in range(i + 1, len(a)):
if a[j] < a[min]:
min = j
count += 1
a[min], a[i] = a[i], a[min]
return count
##################################################################
# Bubble Sort
##################################################################
def bubble(a):
"""
Bubble Sort: compare adjacent elements of the list left-to-right,
and swap them if they are out of order. After one pass through
the list swapping adjacent items, the largest item will be in
the rightmost position. The remainder is one element smaller;
apply the same method to this list, and so on.
"""
count = 0
for i in range(len(a) - 1):
for j in range(len(a) - i - 1):
if a[j + 1] < a[j]:
a[j], a[j + 1] = a[j + 1], a[j]
count += 1
return count
##################################################################
# Merge Sort
##################################################################
def _merge_lists(b, c):
count = 0
i = j = 0
a = []
while i < len(b) and j < len(c):
count += 1
if b[i] <= c[j]:
a.append(b[i])
i += 1
else:
a.append(c[j])
j += 1
if i == len(b):
a += c[j:]
else:
a += b[i:]
return a, count
def merge(a):
"""
Merge Sort: split the list in half, and sort each half, then
combine the sorted halves.
"""
count = 0
if len(a) > 1:
midpoint = len(a) // 2
b = a[:midpoint]
c = a[midpoint:]
count_b = merge(b)
count_c = merge(c)
result, count_a = _merge_lists(b, c)
a[:] = result # copy the result back into a.
count = count_a + count_b + count_c
return count
##################################################################
# Quick Sort
##################################################################
def _partition(a, l, r):
p = a[l]
i = l
j = r + 1
count = 0
while True:
while i < r:
i += 1
if a[i] >= p:
break
while j > l:
j -= 1
if j < l or a[j] <= p:
break
a[i], a[j] = a[j], a[i] # swap
count += 1
if i >= j:
break
a[i], a[j] = a[j], a[i] # undo last swap
a[l], a[j] = a[j], a[l]
return j, count
def _quick(a, l, r):
count = 0
if l < r:
s, count = _partition(a, l, r)
count += _quick(a, l, s - 1)
count += _quick(a, s + 1, r)
return count
def quick(a):
return _quick(a, 0, len(a) - 1)
##################################################################
# Demonstration
##################################################################
def demo():
from random import shuffle
for size in (10, 20, 50, 100, 200, 500, 1000):
a = list(range(size))
# various sort methods
shuffle(a)
count_selection = selection(a)
shuffle(a)
count_bubble = bubble(a)
shuffle(a)
count_merge = merge(a)
shuffle(a)
count_quick = quick(a)
print(
("size=%5d: selection=%8d, bubble=%8d, " "merge=%6d, quick=%6d")
% (size, count_selection, count_bubble, count_merge, count_quick)
)
if __name__ == "__main__":
demo()
|
41d9c4b0579c7f72155cb2451e1432cb691263d5
|
a63d907ad63ba6705420a6fb2788196d1bd3763c
|
/src/api/dataflow/stream/job/monitor_job_strategy.py
|
7c759eecb65cbe4f0a9d3ab75f566459f15c2494
|
[
"MIT"
] |
permissive
|
Tencent/bk-base
|
a38461072811667dc2880a13a5232004fe771a4b
|
6d483b4df67739b26cc8ecaa56c1d76ab46bd7a2
|
refs/heads/master
| 2022-07-30T04:24:53.370661
| 2022-04-02T10:30:55
| 2022-04-02T10:30:55
| 381,257,882
| 101
| 51
|
NOASSERTION
| 2022-04-02T10:30:56
| 2021-06-29T06:10:01
|
Python
|
UTF-8
|
Python
| false
| false
| 20,935
|
py
|
monitor_job_strategy.py
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import datetime
import json
from collections import Counter
from conf.dataapi_settings import AIOPS_TASK_ADMINISTRATORS, SYSTEM_ADMINISTRATORS
from dataflow.shared.jobnavi.jobnavi_helper import JobNaviHelper
from dataflow.shared.log import stream_logger as logger
from dataflow.shared.send_message import send_message
from dataflow.shared.utils.concurrency import concurrent_call_func
from dataflow.stream.api.stream_jobnavi_helper import StreamJobNaviHelper
from dataflow.stream.cluster_config.util.ScheduleHandler import ScheduleHandler
from dataflow.stream.handlers.monitor_handler import MonitorHandler
from dataflow.stream.job.entity.monitor_job_entity import MonitorJobEntity
from dataflow.stream.job.monitor_schedule_handler import MonitorScheduleHandler
from dataflow.stream.metrics.exception_alert import ExceptionAlert
from dataflow.stream.settings import COMMON_MAX_COROUTINE_NUM, DeployMode
class MonitorJobStrategy(object):
_geog_area_code = None
_jobnavi_stream_helper = None
def __init__(self, geog_area_code):
self._geog_area_code = geog_area_code
jobnavi_cluster_id = JobNaviHelper.get_jobnavi_cluster("stream")
self._jobnavi_stream_helper = StreamJobNaviHelper(geog_area_code, jobnavi_cluster_id)
# 获取yarn集群的状态
def request_yarn_state(self):
try:
# step1: validate
self._jobnavi_stream_helper.valid_yarn_service_schedule()
# step2: get exec_id
exec_id = self._jobnavi_stream_helper.get_yarn_service_execute()
# step3: get event_id and parse result
# yarn集群状态
yarn_state = self._jobnavi_stream_helper.send_yarn_state_event(exec_id)
except Exception as e:
logger.exception(e)
send_message(
SYSTEM_ADMINISTRATORS,
"《YARN集群Job监控》",
"YARN集群监控程序通过jobnavi获取线上yarn-state信息异常,请查看具体异常日志",
raise_exception=False,
)
raise e
return yarn_state
# 获取yarn上所有apps
def request_yarn_jobs(self):
try:
# step1: validate
self._jobnavi_stream_helper.valid_yarn_service_schedule()
# step2: get exec_id
exec_id = self._jobnavi_stream_helper.get_yarn_service_execute()
# step3: get event_id and parse result
# yarn集群上RUNNING中的Flink任务的applications
yarn_apps_arr = self._jobnavi_stream_helper.send_yarn_apps_event(exec_id)
yarn_apps = json.loads(yarn_apps_arr)
except Exception as e:
logger.exception(e)
send_message(
SYSTEM_ADMINISTRATORS,
"《YARN集群Job监控》",
"YARN集群监控程序通过jobnavi获取线上yarn-apps信息异常,请查看具体异常日志",
raise_exception=False,
)
raise e
return yarn_apps
# 查询平台提交的job(全部job),通过地区编码进行过滤
def get_store_flink_running_job(self):
if self._geog_area_code == "inland":
return MonitorHandler.get_store_flink_running_job()
else:
geog_area_jobs = []
job_info_ids = []
jobs_info = MonitorHandler.get_store_flink_job_info(self._geog_area_code)
for job_info in jobs_info:
job_info_ids.append(job_info["job_id"])
running_jobs = MonitorHandler.get_store_flink_running_job()
for job in running_jobs:
if job["stream_id"] in job_info_ids:
geog_area_jobs.append(job)
return geog_area_jobs
# 查询平台提交的job(yarn-cluster-job),通过地区编码进行过滤
def get_store_yarn_cluster(self):
if self._geog_area_code == "inland":
return MonitorHandler.get_store_yarn_cluster()
else:
geog_area_jobs = []
job_info_ids = []
jobs_info = MonitorHandler.get_store_flink_job_info(self._geog_area_code)
for job_info in jobs_info:
job_info_ids.append(job_info["job_id"])
running_jobs = MonitorHandler.get_store_yarn_cluster()
for job in running_jobs:
if job["stream_id"] in job_info_ids:
geog_area_jobs.append(job)
return geog_area_jobs
# 获得YARN集群中所有Job的具体信息
def request_yarn_job_details(self, yarn_apps):
job_detail_list = []
func_info = []
for yarn_app in yarn_apps:
yarn_app_name = yarn_app["name"]
if "debug" in yarn_app_name:
continue
func_info.append([ScheduleHandler(yarn_app_name, self._geog_area_code).list_status, {}])
for segment in [
func_info[i : i + COMMON_MAX_COROUTINE_NUM] for i in range(0, len(func_info), COMMON_MAX_COROUTINE_NUM)
]:
threads_res = concurrent_call_func(segment)
for segment_index, application_info in enumerate(threads_res):
if application_info:
for per_job_info in application_info["jobs"]:
name = per_job_info["name"]
state = per_job_info["state"]
duration = per_job_info["duration"]
tasks_total = per_job_info["tasks"]["total"]
tasks_running = per_job_info["tasks"]["running"]
schedule_id = application_info["schedule_id"]
if state in ["CANCELED", "FINISHED"]:
continue
job_detail_list.append(
MonitorJobEntity(
name=name,
state=state,
duration=duration,
tasks_total=tasks_total,
tasks_running=tasks_running,
schedule_id=schedule_id,
)
)
return job_detail_list
# YARN集群本身是否健康
def check_yarn_state_health(self, yarn_state):
if not yarn_state:
send_message(
SYSTEM_ADMINISTRATORS,
"《YARN集群Job监控》",
"[%s]获取YARN集群状态异常" % self._geog_area_code,
raise_exception=False,
)
logger.info("monitor yarn state is : %s" % yarn_state)
# 查询对比(db中记录的yarn-session集群)已经提交运行的yarn-session集群在YARN上是否存活,返回不健康的yarn-session
def query_yarn_session_apps_health(self, yarn_apps):
db_running_yarn_sessions_name = []
db_running_yarn_sessions = MonitorHandler.get_store_yarn_session(self._geog_area_code)
for yarn_session in db_running_yarn_sessions:
db_running_yarn_sessions_name.append(yarn_session["cluster_name"])
yarn_running_apps_name = []
for yarn_app in yarn_apps:
yarn_running_apps_name.append(yarn_app["name"])
db_running_yarn_sessions_diff = list(set(db_running_yarn_sessions_name).difference(set(yarn_running_apps_name)))
return db_running_yarn_sessions_diff
# 检测(db中记录的yarn-session集群)已经提交运行的yarn-session集群在YARN上是否存活,并发送异常告警
def check_yarn_session_apps_health(self, yarn_apps):
db_running_yarn_sessions_diff = self.query_yarn_session_apps_health(yarn_apps)
if db_running_yarn_sessions_diff:
send_message(
SYSTEM_ADMINISTRATORS,
"《YARN集群Job监控》",
"[{}]yarn-session: {} 在集群上状态异常".format(self._geog_area_code, db_running_yarn_sessions_diff),
raise_exception=False,
)
# 查询对比 cluster集群在YARN上是否存活
def query_yarn_cluster_apps_health(self, yarn_apps):
db_running_yarn_cluster_stream_ids = []
db_running_yarn_clusters = self.get_store_yarn_cluster()
for yarn_cluster in db_running_yarn_clusters:
db_running_yarn_cluster_stream_ids.append(yarn_cluster["stream_id"])
yarn_running_apps_name = []
for yarn_app in yarn_apps:
yarn_running_apps_name.append(yarn_app["name"])
db_running_yarn_clusters_diff = list(
set(db_running_yarn_cluster_stream_ids).difference(set(yarn_running_apps_name))
)
return db_running_yarn_clusters_diff
# 检测(db中记录的yarn - cluster集群)已经提交运行的yarn - cluster集群在YARN上是否存活
def check_yarn_cluster_apps_health(self, yarn_apps):
db_running_yarn_clusters_diff = self.query_yarn_cluster_apps_health(yarn_apps)
if db_running_yarn_clusters_diff:
send_message(
SYSTEM_ADMINISTRATORS,
"《YARN集群Job监控》",
"[{}]yarn-cluster: {} 在集群上状态异常".format(self._geog_area_code, db_running_yarn_clusters_diff),
raise_exception=False,
)
# YARN上是否存在job未在DB中记录(即平台该任务已经停止,但YARN上仍然运行),或相同job_id在YARN上同时存在重复的运行任务
def check_yarn_duplicate_jobs(self, db_running_jobs, yarn_job_details):
db_running_jobs_stream_ids = []
for db_job in db_running_jobs:
db_running_jobs_stream_ids.append(db_job["stream_id"])
yarn_running_jobs_name = []
for yarn_job in yarn_job_details:
yarn_running_jobs_name.append(yarn_job.name)
yarn_running_jobs_diff = list(set(yarn_running_jobs_name).difference(set(db_running_jobs_stream_ids)))
# yarn上job仍然在运行,但DB已经停止
if yarn_running_jobs_diff:
extra_jobs_name_schedule = []
for yarn_job in yarn_job_details:
if yarn_job.name in yarn_running_jobs_diff:
extra_jobs_name_schedule.append("{} -> {}".format(yarn_job.schedule_id, yarn_job.name))
if extra_jobs_name_schedule:
send_message(
SYSTEM_ADMINISTRATORS,
"《YARN集群Job监控》",
"[{}]job: {} 在集群上未停止运行".format(self._geog_area_code, extra_jobs_name_schedule),
raise_exception=False,
)
# yarn上出现相同重复的job
yarn_running_jobs_name_counter = dict(Counter(yarn_running_jobs_name))
duplicate_jobs_name = []
duplicate_jobs_name_schedule = []
for job_name, num in list(yarn_running_jobs_name_counter.items()):
if num > 1:
duplicate_jobs_name.append(job_name)
for yarn_job in yarn_job_details:
if yarn_job.name in duplicate_jobs_name:
duplicate_jobs_name_schedule.append("{} -> {}".format(yarn_job.schedule_id, yarn_job.name))
if duplicate_jobs_name_schedule:
send_message(
SYSTEM_ADMINISTRATORS,
"《YARN集群Job监控》",
"job: %s 在集群上重复提交运行" % duplicate_jobs_name_schedule,
raise_exception=False,
)
# 查询对比(db中记录为running任务job)在yarn上不存在,任务Job丢失
def query_missing_jobs(self, db_running_jobs, yarn_job_details):
db_running_jobs_stream_ids = []
for db_job in db_running_jobs:
db_running_jobs_stream_ids.append(db_job["stream_id"])
yarn_running_jobs_name = []
for yarn_job in yarn_job_details:
yarn_running_jobs_name.append(yarn_job.name)
db_running_jobs_diff = list(set(db_running_jobs_stream_ids).difference(set(yarn_running_jobs_name)))
return db_running_jobs_diff
# 检测(db中记录为running任务job)在yarn上不存在,任务Job丢失
def check_missing_jobs(self, db_running_jobs, yarn_job_details):
db_running_jobs_diff = self.query_missing_jobs(db_running_jobs, yarn_job_details)
if db_running_jobs_diff:
send_message(
SYSTEM_ADMINISTRATORS,
"《YARN集群Job监控》",
"[{}]job: {} 在集群上已经丢失".format(self._geog_area_code, db_running_jobs_diff),
raise_exception=False,
)
# YARN集群上任务Job状态是否健康 即 state!=RUNNING OR total_task != running_task
def check_job_health(self, db_running_jobs, yarn_job_details):
ten_minutes_as_second = 10 * 60
possible_unhealth_yarn_job = []
unhealth_yarn_job = []
for yarn_job in yarn_job_details:
if yarn_job.state != "RUNNING" or yarn_job.tasks_total != yarn_job.tasks_running:
possible_unhealth_yarn_job.append(yarn_job.name)
# 排除掉任务正常手动提交的情况,减少误报
for db_job in db_running_jobs:
if (
db_job["stream_id"] in possible_unhealth_yarn_job
and (datetime.datetime.now() - db_job["updated_at"]).total_seconds() > ten_minutes_as_second
):
unhealth_yarn_job.append(db_job["stream_id"])
if unhealth_yarn_job:
send_message(
SYSTEM_ADMINISTRATORS,
"《YARN集群Job监控》",
"[{}]job: {} 在集群上状态不健康".format(self._geog_area_code, unhealth_yarn_job),
raise_exception=False,
)
# 监控task重启情况, db update_time >10min 且 during<监控周期(10min),同时发送最近抛出Exceptions内容及时间。
def check_job_restart(self, db_running_jobs, yarn_job_details):
ten_minutes_as_millisecond = 10 * 60 * 1000
yarn_restart_job = []
unnormal_restart_job = []
for yarn_job in yarn_job_details:
if yarn_job.state == "RUNNING" and yarn_job.duration < ten_minutes_as_millisecond:
yarn_restart_job.append(yarn_job.name)
for db_job in db_running_jobs:
if (
db_job["stream_id"] in yarn_restart_job
and (datetime.datetime.now() - db_job["updated_at"]).total_seconds() > ten_minutes_as_millisecond / 1000
):
exception = self._get_exception_by_job(db_job)
unnormal_restart_job.append(" {} -> {} ".format(db_job["stream_id"], exception))
if unnormal_restart_job:
send_message(
SYSTEM_ADMINISTRATORS,
"《YARN集群Job监控》",
"[{}]job: {} 在集群上发生重启".format(self._geog_area_code, unnormal_restart_job),
raise_exception=False,
)
# 根据job信息获取exception
def _get_exception_by_job(self, db_job):
deploy_mode = db_job["deploy_mode"]
cluster_name = db_job["cluster_name"]
stream_id = db_job["stream_id"]
exception = ""
try:
if deploy_mode == DeployMode.YARN_CLUSTER.value:
schedule_id = stream_id
else:
schedule_id = cluster_name
cluster_is_exist = self._jobnavi_stream_helper.get_schedule(schedule_id)
if cluster_is_exist:
execute_id = self._jobnavi_stream_helper.get_execute_id(schedule_id)
if execute_id:
flink_ex = self._jobnavi_stream_helper.get_job_exceptions(execute_id, stream_id)
if (
flink_ex is not None
and flink_ex["root-exception"] is not None
and flink_ex["root-exception"] != ""
):
ex_timestamp = flink_ex["timestamp"] / 1000
ex_time = datetime.datetime.fromtimestamp(ex_timestamp)
ex_stacktrace = flink_ex["root-exception"]
# report exception msg to user alert
ExceptionAlert.report_exception_metrics(
stream_id, ex_timestamp, ex_stacktrace, self._geog_area_code
)
exception = "time: {} exception: {}".format(
ex_time,
ex_stacktrace[0:200],
)
except Exception as e:
logger.exception(e)
return exception
# 查询对比spark structured streaming(code/zip) 任务状态
def query_spark_streaming_health(self):
db_running_spark_jobs = MonitorHandler.get_db_spark_streaming_running_job()
unhealth_spark_job_list = []
func_info = []
for spark_job in db_running_spark_jobs:
stream_id = spark_job["stream_id"]
func_info.append(
[
MonitorScheduleHandler(stream_id, self._jobnavi_stream_helper).check_per_spark_streaming_status,
{},
]
)
for segment in [
func_info[i : i + COMMON_MAX_COROUTINE_NUM] for i in range(0, len(func_info), COMMON_MAX_COROUTINE_NUM)
]:
threads_res = concurrent_call_func(segment)
for segment_index, job_status in enumerate(threads_res):
if job_status:
unhealth_spark_job_list.append(job_status)
return unhealth_spark_job_list
# 监控spark structured streaming(code/zip) 任务状态
def check_spark_streaming_health(self):
unhealth_spark_job_list = self.query_spark_streaming_health()
if unhealth_spark_job_list:
send_message(
SYSTEM_ADMINISTRATORS + AIOPS_TASK_ADMINISTRATORS,
"《YARN集群Job监控》",
"[{}] [Spark Structured Streaming Job]: {} 在集群上状态不健康".format(
self._geog_area_code, unhealth_spark_job_list
),
raise_exception=False,
)
# 查询对比flink code cluster集群在YARN上是否存活
def query_flink_streaming_code_health(self, yarn_apps):
db_running_flink_code_stream_ids = []
db_running_flink_code_clusters = MonitorHandler.get_db_flink_code_running_job()
for flink_code in db_running_flink_code_clusters:
db_running_flink_code_stream_ids.append(flink_code["stream_id"])
yarn_running_apps_name = []
for yarn_app in yarn_apps:
yarn_running_apps_name.append(yarn_app["name"])
db_running_flink_code_diff = list(set(db_running_flink_code_stream_ids).difference(set(yarn_running_apps_name)))
return db_running_flink_code_diff
# 监控flink code cluster集群在YARN上是否存活
def check_flink_streaming_code_health(self, yarn_apps):
unhealth_flink_streaming_code_list = self.query_flink_streaming_code_health(yarn_apps)
if unhealth_flink_streaming_code_list:
send_message(
SYSTEM_ADMINISTRATORS,
"《YARN集群Job监控》",
"[%s] [Flink Streaming Code Job]: %s 在集群上状态不健康"
% (self._geog_area_code, unhealth_flink_streaming_code_list),
raise_exception=False,
)
|
3fe1f21bdce0d726fb80e4159482eab64779a1bf
|
f80ef3a3cf859b13e8af8433af549b6b1043bf6e
|
/pyobjc-framework-OSLog/PyObjCTest/test_entry.py
|
3bf66525d851f5dcc7bc44f3903478d5cbb35793
|
[
"MIT"
] |
permissive
|
ronaldoussoren/pyobjc
|
29dc9ca0af838a56105a9ddd62fb38ec415f0b86
|
77b98382e52818690449111cd2e23cd469b53cf5
|
refs/heads/master
| 2023-09-01T05:15:21.814504
| 2023-06-13T20:00:17
| 2023-06-13T20:00:17
| 243,933,900
| 439
| 49
| null | 2023-06-25T02:49:07
| 2020-02-29T08:43:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,506
|
py
|
test_entry.py
|
import OSLog
import objc
from PyObjCTools.TestSupport import TestCase, min_sdk_level
class TestEntryHelper(OSLog.NSObject):
def activityIdentifier(self):
return 1
def processIdentifier(self):
return 1
def threadIdentifier(self):
return 1
class TestEntry(TestCase):
def test_enum_types(self):
self.assertIsEnumType(OSLog.OSLogEntryStoreCategory)
def test_constants(self):
self.assertEqual(OSLog.OSLogEntryStoreCategoryUndefined, 0)
self.assertEqual(OSLog.OSLogEntryStoreCategoryMetadata, 1)
self.assertEqual(OSLog.OSLogEntryStoreCategoryShortTerm, 2)
self.assertEqual(OSLog.OSLogEntryStoreCategoryLongTermAuto, 3)
self.assertEqual(OSLog.OSLogEntryStoreCategoryLongTerm1, 4)
self.assertEqual(OSLog.OSLogEntryStoreCategoryLongTerm3, 5)
self.assertEqual(OSLog.OSLogEntryStoreCategoryLongTerm7, 6)
self.assertEqual(OSLog.OSLogEntryStoreCategoryLongTerm14, 7)
self.assertEqual(OSLog.OSLogEntryStoreCategoryLongTerm30, 8)
def test_methods(self):
self.assertResultHasType(TestEntryHelper.activityIdentifier, objc._C_NSUInteger)
self.assertResultHasType(TestEntryHelper.processIdentifier, objc._C_INT)
self.assertResultHasType(TestEntryHelper.threadIdentifier, objc._C_ULNGLNG)
@min_sdk_level("10.15")
def test_protocols(self):
self.assertProtocolExists("OSLogEntryFromProcess")
self.assertProtocolExists("OSLogEntryWithPayload")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.