blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a6153be6c8c3ad35199aa7fd63c497340c7a3b10
|
80e55dff40b8454c2ac48a012a6b77b027b84056
|
/setup.py
|
613f7647209eef501ad1aed6e748c56bcbff20d3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
localstack/awscli-local
|
7d0f881f58cf07a6213d899b5d6aa925d6564f55
|
f52aa9e2878651532a9f733ba4c4efdbe9e6b718
|
refs/heads/master
| 2023-08-08T11:29:25.715328
| 2023-07-20T08:50:52
| 2023-07-20T08:50:52
| 96,595,434
| 859
| 85
|
Apache-2.0
| 2023-07-20T08:48:15
| 2017-07-08T04:16:23
|
Python
|
UTF-8
|
Python
| false
| false
| 1,521
|
py
|
setup.py
|
#!/usr/bin/env python
from setuptools import setup
description = 'Thin wrapper around the "aws" command line interface for use with LocalStack'
README = description
try:
with open('README.md') as f:
README = f.read()
except Exception:
pass
if __name__ == '__main__':
setup(
name='awscli-local',
version='0.21',
description=description,
long_description=README,
long_description_content_type='text/markdown',
author='LocalStack Team',
author_email='info@localstack.cloud',
url='https://github.com/localstack/awscli-local',
packages=[],
scripts=['bin/awslocal', 'bin/awslocal.bat'],
package_data={},
data_files={},
install_requires=['localstack-client'],
extras_require={
'ver1': ['awscli'],
},
license="Apache License 2.0",
classifiers=[
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"License :: OSI Approved :: Apache Software License",
"Topic :: Software Development :: Testing"
]
)
|
91f93eca8574d0fc4781e18e69c9e12a41e12933
|
c8b39acfd4a857dc15ed3375e0d93e75fa3f1f64
|
/Engine/Extras/ThirdPartyNotUE/emsdk/emscripten/1.37.19/tools/diff_autodebugger.py
|
0a2a7634f65bac2380d76f064667755fefff36d1
|
[
"MIT",
"LicenseRef-scancode-proprietary-license",
"NCSA",
"BSD-3-Clause",
"LicenseRef-scancode-other-permissive",
"LicenseRef-scancode-mit-nagy",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
windystrife/UnrealEngine_NVIDIAGameWorks
|
c3c7863083653caf1bc67d3ef104fb4b9f302e2a
|
b50e6338a7c5b26374d66306ebc7807541ff815e
|
refs/heads/4.18-GameWorks
| 2023-03-11T02:50:08.471040
| 2022-01-13T20:50:29
| 2022-01-13T20:50:29
| 124,100,479
| 262
| 179
|
MIT
| 2022-12-16T05:36:38
| 2018-03-06T15:44:09
|
C++
|
UTF-8
|
Python
| false
| false
| 463
|
py
|
diff_autodebugger.py
|
'''
Very simple line-by line diff of autodebugger outputs. useful when there are no added or removed lines,
and there are float differences
'''
import os, sys
f1 = open(sys.argv[1], 'r').readlines()
f2 = open(sys.argv[2], 'r').readlines()
for i in range(len(f1)):
if f1[i] == f2[i]: continue
v1 = float(f1[i].split(',')[1])
v2 = float(f2[i].split(',')[1])
print '%5d %10s %f ' % (i+1, f1[i].split(',')[0], v1-v2), ' ', v1-v2, v1, v2
|
a0d4fa69b80116fa8d3237bf1587373489cb24ba
|
47d69d21f53333d93d5ba9973840ef192808a090
|
/src/tox/util/spinner.py
|
4fb6320a8fb6e946efafb11a03a3232d182649ee
|
[
"MIT"
] |
permissive
|
tox-dev/tox
|
27ce3072e7faf5c88ed5305bbd66359369bba13d
|
da0885cd162fb02de866831a75eca9dcfe87eb36
|
refs/heads/main
| 2023-09-01T11:45:18.097559
| 2023-08-31T14:51:57
| 2023-08-31T14:51:57
| 68,465,360
| 3,512
| 624
|
MIT
| 2023-09-11T20:58:32
| 2016-09-17T16:54:22
|
Python
|
UTF-8
|
Python
| false
| false
| 6,870
|
py
|
spinner.py
|
"""A minimal non-colored version of https://pypi.org/project/halo, to track list progress."""
from __future__ import annotations
import os
import sys
import textwrap
import threading
import time
from collections import OrderedDict
from typing import IO, TYPE_CHECKING, NamedTuple, Sequence, TypeVar
from colorama import Fore
if TYPE_CHECKING:
from types import TracebackType
from typing import Any, ClassVar
if sys.platform == "win32": # pragma: win32 cover
import ctypes
class _CursorInfo(ctypes.Structure):
_fields_: ClassVar[list[tuple[str, Any]]] = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def _file_support_encoding(chars: Sequence[str], file: IO[str]) -> bool:
encoding = getattr(file, "encoding", None)
if encoding is not None: # pragma: no branch # this should be always set, unless someone passes in something bad
try:
for char in chars:
char.encode(encoding)
except UnicodeEncodeError:
pass
else:
return True
return False
T = TypeVar("T", bound="Spinner")
MISS_DURATION = 0.01
class Outcome(NamedTuple):
ok: str
fail: str
skip: str
class Spinner:
CLEAR_LINE = "\033[K"
max_width = 120
UNICODE_FRAMES: ClassVar[list[str]] = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"]
ASCII_FRAMES: ClassVar[list[str]] = ["|", "-", "+", "x", "*"]
UNICODE_OUTCOME = Outcome(ok="✔", fail="✖", skip="⚠")
ASCII_OUTCOME = Outcome(ok="+", fail="!", skip="?")
def __init__( # noqa: PLR0913
self,
enabled: bool = True, # noqa: FBT001, FBT002
refresh_rate: float = 0.1,
colored: bool = True, # noqa: FBT001, FBT002
stream: IO[str] | None = None,
total: int | None = None,
) -> None:
self.is_colored = colored
self.refresh_rate = refresh_rate
self.enabled = enabled
stream = sys.stdout if stream is None else stream
self.frames = self.UNICODE_FRAMES if _file_support_encoding(self.UNICODE_FRAMES, stream) else self.ASCII_FRAMES
self.outcome = (
self.UNICODE_OUTCOME if _file_support_encoding(self.UNICODE_OUTCOME, stream) else self.ASCII_OUTCOME
)
self.stream = stream
self.total = total
self.print_report = True
self._envs: dict[str, float] = OrderedDict()
self._frame_index = 0
def clear(self) -> None:
if self.enabled:
self.stream.write("\r")
self.stream.write(self.CLEAR_LINE)
def render(self) -> Spinner:
while True:
self._stop_spinner.wait(self.refresh_rate)
if self._stop_spinner.is_set():
break
self.render_frame()
return self
def render_frame(self) -> None:
if self.enabled:
self.clear()
self.stream.write(f"\r{self.frame()}")
def frame(self) -> str:
frame = self.frames[self._frame_index]
self._frame_index += 1
self._frame_index %= len(self.frames)
total = f"/{self.total}" if self.total is not None else ""
text_frame = f"[{len(self._envs)}{total}] {' | '.join(self._envs)}"
text_frame = textwrap.shorten(text_frame, width=self.max_width - 1, placeholder="...")
return f"{frame} {text_frame}"
def __enter__(self: T) -> T:
if self.enabled:
self.disable_cursor()
self.render_frame()
self._stop_spinner = threading.Event()
self._spinner_thread = threading.Thread(target=self.render)
self._spinner_thread.daemon = True
self._spinner_thread.start()
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
if not self._stop_spinner.is_set(): # pragma: no branch
if self._spinner_thread: # pragma: no branch # hard to test
self._stop_spinner.set()
self._spinner_thread.join()
self._frame_index = 0
if self.enabled:
self.clear()
self.enable_cursor()
def add(self, name: str) -> None:
self._envs[name] = time.monotonic()
def succeed(self, key: str) -> None:
self.finalize(key, f"OK {self.outcome.ok}", Fore.GREEN)
def fail(self, key: str) -> None:
self.finalize(key, f"FAIL {self.outcome.fail}", Fore.RED)
def skip(self, key: str) -> None:
self.finalize(key, f"SKIP {self.outcome.skip}", Fore.YELLOW)
def finalize(self, key: str, status: str, color: str) -> None:
start_at = self._envs.pop(key, None)
if self.enabled:
self.clear()
if self.print_report:
duration = MISS_DURATION if start_at is None else time.monotonic() - start_at
base = f"{key}: {status} in {td_human_readable(duration)}"
if self.is_colored:
base = f"{color}{base}{Fore.RESET}"
base += os.linesep
self.stream.write(base)
def disable_cursor(self) -> None:
if self.stream.isatty():
if sys.platform == "win32": # pragma: win32 cover
ci = _CursorInfo()
handle = ctypes.windll.kernel32.GetStdHandle(-11)
ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci))
ci.visible = False
ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci))
else:
self.stream.write("\033[?25l")
def enable_cursor(self) -> None:
if self.stream.isatty():
if sys.platform == "win32": # pragma: win32 cover
ci = _CursorInfo()
handle = ctypes.windll.kernel32.GetStdHandle(-11)
ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci))
ci.visible = True
ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci))
else:
self.stream.write("\033[?25h")
_PERIODS = [
("day", 60 * 60 * 24),
("hour", 60 * 60),
("minute", 60),
("second", 1),
]
def td_human_readable(seconds: float) -> str:
texts: list[str] = []
for period_name, period_seconds in _PERIODS:
period_str = None
if period_name == "second" and (seconds >= 0.01 or not texts): # noqa: PLR2004
period_str = f"{seconds:.2f}".rstrip("0").rstrip(".")
elif seconds >= period_seconds:
period_value, seconds = divmod(seconds, period_seconds)
period_str = f"{period_value:.0f}"
if period_str is not None:
texts.append(f"{period_str} {period_name}{'' if period_str == '1' else 's'}")
return " ".join(texts)
|
6ca354b69ea5dd4fa240ee3aedf5e54eec622ce5
|
94c1805df5a09c39159d502f420d19ad54b567fc
|
/runtime/deps/gyp/test/mac/objc-arc/test.gyp
|
59cf0e29ceeb83df3ed598354c44d82571079c57
|
[
"BSD-3-Clause",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
tmikov/jscomp
|
9805a5a4d06520549c57380f0df4a1c0aa0dab56
|
83828441cb38ec96603a6a60be06977d4852940a
|
refs/heads/develop
| 2021-01-19T02:56:35.102659
| 2016-04-12T06:19:30
| 2016-04-12T06:19:30
| 36,981,674
| 237
| 13
|
Apache-2.0
| 2018-10-14T09:48:12
| 2015-06-06T13:49:26
|
C
|
UTF-8
|
Python
| false
| false
| 1,128
|
gyp
|
test.gyp
|
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'make_global_settings': [
['CC', '/usr/bin/clang'],
['CXX', '/usr/bin/clang++'],
],
'targets': [
{
'target_name': 'arc_enabled',
'type': 'static_library',
'sources': [
'c-file.c',
'cc-file.cc',
'm-file.m',
'mm-file.mm',
],
'xcode_settings': {
'GCC_VERSION': 'com.apple.compilers.llvm.clang.1_0',
'MACOSX_DEPLOYMENT_TARGET': '10.6',
'ARCHS': [ 'x86_64' ], # For the non-fragile objc ABI.
'CLANG_ENABLE_OBJC_ARC': 'YES',
},
},
{
'target_name': 'arc_disabled',
'type': 'static_library',
'sources': [
'c-file.c',
'cc-file.cc',
'm-file-no-arc.m',
'mm-file-no-arc.mm',
],
'xcode_settings': {
'GCC_VERSION': 'com.apple.compilers.llvm.clang.1_0',
'MACOSX_DEPLOYMENT_TARGET': '10.6',
'ARCHS': [ 'x86_64' ], # For the non-fragile objc ABI.
},
},
],
}
|
82e3554c39ce31f6ef759b0e71ec204cfe86c002
|
8880226d2ca1c9448c44b3e9f21226a58e61ac93
|
/tests/test_conditions.py
|
ccbf3270b952c5daf0ee97807f3d590e9d43a5a6
|
[
"BSD-2-Clause"
] |
permissive
|
cloudtools/awacs
|
2f82958ccc7ba2177492c29c706a5737f19dd2d1
|
c449a9637f01c26e73b827a9f8d5cc7715bbbea2
|
refs/heads/main
| 2023-08-31T00:58:28.636568
| 2023-08-28T05:13:01
| 2023-08-28T05:13:01
| 9,062,692
| 385
| 107
|
BSD-2-Clause
| 2023-08-13T23:21:39
| 2013-03-27T20:16:27
|
Python
|
UTF-8
|
Python
| false
| false
| 1,375
|
py
|
test_conditions.py
|
import json
import unittest
import awacs.aws as aws
import awacs.s3 as s3
class TestConditions(unittest.TestCase):
def test_for_all_values(self):
c = aws.Condition(
aws.ForAllValuesStringLike(
"dynamodb:requestedAttributes", ["PostDateTime", "Message", "Tags"]
)
)
pd = aws.PolicyDocument(
Statement=[
aws.Statement(
Action=[s3.ListBucket],
Effect=aws.Allow,
Resource=[s3.ARN("myBucket")],
Condition=c,
)
]
)
self.assertEqual(
{
"Statement": [
{
"Action": ["s3:ListBucket"],
"Condition": {
"ForAllValues:StringLike": {
"dynamodb:requestedAttributes": [
"PostDateTime",
"Message",
"Tags",
]
}
},
"Effect": "Allow",
"Resource": ["arn:aws:s3:::myBucket"],
}
]
},
json.loads(pd.to_json()),
)
|
c26a9669e3cf965d11df82c77555aacb63d47b84
|
a63d907ad63ba6705420a6fb2788196d1bd3763c
|
/src/datamgr/datamanager/collection/cmdb/processes/host.py
|
7b30514631433ce7fe88a6e4ceb8ab644b46db91
|
[
"MIT"
] |
permissive
|
Tencent/bk-base
|
a38461072811667dc2880a13a5232004fe771a4b
|
6d483b4df67739b26cc8ecaa56c1d76ab46bd7a2
|
refs/heads/master
| 2022-07-30T04:24:53.370661
| 2022-04-02T10:30:55
| 2022-04-02T10:30:55
| 381,257,882
| 101
| 51
|
NOASSERTION
| 2022-04-02T10:30:56
| 2021-06-29T06:10:01
|
Python
|
UTF-8
|
Python
| false
| false
| 5,384
|
py
|
host.py
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from collection.common.process import BKDFlowProcessor
from collection.common.process_nodes import (
CleanTemplate,
IgniteStorageTemplate,
ProcessTemplate,
)
from collection.conf import constants
class CleanCMDBHostInfoTemplate(CleanTemplate):
template = "clean_of_cmdb_host_info.jinja"
class IgniteStorageCMDBHostInfoTemplate(IgniteStorageTemplate):
template = "storage_of_cmdb_host_info.jinja"
class DataModelCMDBHostInfoTemplate(ProcessTemplate):
template = "datamodel_of_cmdb_host_info.jinja"
class DataModelInstCMDBHostInfoTemplate(ProcessTemplate):
template = "datamodel_inst_of_cmdb_host_info.jinja"
BKDFlowProcessor.regiter_process_template(CleanCMDBHostInfoTemplate)
BKDFlowProcessor.regiter_process_template(IgniteStorageCMDBHostInfoTemplate)
BKDFlowProcessor.regiter_process_template(DataModelCMDBHostInfoTemplate)
BKDFlowProcessor.regiter_process_template(DataModelInstCMDBHostInfoTemplate)
def process_cmdb_host_info():
process_cmdb_host_config = {
"pipeline": [
{
"process_node": "AccessNode",
"process_template": "AccessCustomTemplate",
"process_context": {
"bk_biz_id": constants.BKDATA_BIZ_ID,
"raw_data_name": constants.CMDB_HOST_TABLE_NAME,
"raw_data_alias": constants.CMDB_HOST_TABLE_ALIA,
},
},
{
"process_node": "CleanNode",
"process_template": "CleanCMDBHostInfoTemplate",
"process_context": {
"bk_biz_id": constants.BKDATA_BIZ_ID,
"raw_data_id": "$0.raw_data_id",
"result_table_name": constants.CMDB_HOST_TABLE_NAME,
"result_table_alias": constants.CMDB_HOST_TABLE_ALIA,
},
},
{
"process_node": "IgniteStorageNode",
"process_template": "IgniteStorageCMDBHostInfoTemplate",
"process_context": {
"bk_biz_id": constants.BKDATA_BIZ_ID,
"raw_data_id": "$0.raw_data_id",
"result_table_id": f"{constants.BKDATA_BIZ_ID}_{constants.CMDB_HOST_TABLE_NAME}",
"result_table_name": constants.CMDB_HOST_TABLE_NAME,
"result_table_alias": constants.CMDB_HOST_TABLE_ALIA,
"storage_cluster": constants.DEFAULT_IGNITE_CLUSTER,
},
},
{
"process_node": "AuthProjectDataNode",
"process_template": "SimpleTemplate",
"process_context": {
"project_id": constants.BKPUB_PROJECT_ID,
"bk_biz_id": constants.BKDATA_BIZ_ID,
"result_table_id": f"{constants.BKDATA_BIZ_ID}_{constants.CMDB_HOST_TABLE_NAME}",
},
},
{
"process_node": "DataModelNode",
"process_template": "DataModelCMDBHostInfoTemplate",
"process_context": {
"project_id": constants.BKPUB_PROJECT_ID,
"model_name": constants.CMDB_HOST_DATAMODEL_NAME,
},
},
{
"process_node": "DataModelInstNode",
"process_template": "DataModelInstCMDBHostInfoTemplate",
"process_context": {
"model_id": "$4.model_id",
"project_id": constants.BKPUB_PROJECT_ID,
"bk_biz_id": constants.BKDATA_BIZ_ID,
"input_result_table_id": f"{constants.BKDATA_BIZ_ID}_{constants.CMDB_HOST_TABLE_NAME}",
"table_name": constants.CMDB_HOST_DATAMODEL_TABLE_NAME,
"cluster_name": constants.DEFAULT_IGNITE_CLUSTER,
},
},
]
}
BKDFlowProcessor(process_cmdb_host_config["pipeline"]).build()
|
81345923573492bfc497f8c864e0f0c21baf08ac
|
07f0563f9d53e27032bdcb6393c7f035bf78ef7a
|
/weditor/web/ipyshell-console.py
|
93857c4e9062540ee890209e107a99552fc1c14f
|
[
"MIT"
] |
permissive
|
alibaba/web-editor
|
1fc7d0d6376cc7efa42e66b616c1675628d05f65
|
eb9560c064ce27a87dabc3997740a488a704c37d
|
refs/heads/master
| 2023-08-09T09:33:07.253648
| 2023-07-24T03:10:45
| 2023-07-24T03:10:45
| 84,531,631
| 690
| 168
|
MIT
| 2023-08-09T04:26:41
| 2017-03-10T07:23:40
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 5,136
|
py
|
ipyshell-console.py
|
# coding: utf-8
#
# WRT:{quoted output string}
# EOF:{running milliseconds} 结束标记
# DBG:{debug string}
# LNO:{line number} # 从0开始
# 使用方法
# python3 {__file__}.py
# >>> print("hello", end="")
# LNO:0
# DBG: 0 print("hello", end="")
# WRT:"hello"
# EOF:1
import contextlib
import linecache
import json
import os
import sys
import traceback
import time
from typing import Union, Any
def exec_code(code: str, globals) -> Union[Any, None]:
try:
ccode = compile(code, "<string>", "eval")
_eval = True
except SyntaxError:
ccode = compile(code, "<string>", "exec")
_eval = False
if _eval:
return eval(ccode, globals)
exec(ccode, globals)
_file_contents = {}
def getline(filename: str, lineno: int) -> str:
"""
Args:
lineno starts from 0
Note:
linecache.getline starts from 1
"""
if os.path.isfile(filename):
return linecache.getline(filename, lineno + 1)
if filename == "<string>":
lines = _file_contents[filename].splitlines()
if lineno < len(lines):
return lines[lineno]
return ''
def gen_tracefunc(trace_filename: str, sys_stdout):
"""
Ref: http://www.dalkescientific.com/writings/diary/archive/2005/04/20/tracing_python_code.html
"""
def _trace(frame, event, arg):
if event == "line":
lineno = frame.f_lineno - 1 # set lineno starts from 0
filename = frame.f_globals.get("__file__")
if filename == trace_filename:
line = getline(filename, lineno).rstrip()
sys_stdout.write("LNO:{}\n".format(lineno))
sys_stdout.write(f"DBG:{lineno:3d} {line}\n")
sys_stdout.flush()
# time.sleep(.5)
return _trace
return _trace
class QuitError(Exception):
""" quit for this program """
@contextlib.contextmanager
def mock_stdout_stderr(prefix="WRT:"):
_stdout = sys.stdout
_stderr = sys.stderr
try:
class MockStdout:
def isatty(self) -> bool:
return False
def write(self, data: str):
try:
if data != "":
_stdout.write(prefix + json.dumps(data) + "\n")
_stdout.flush()
except Exception as e:
raise QuitError("Output exception", str(e))
def flush(self):
_stdout.flush()
sys.stdout = sys.stderr = MockStdout()
yield _stdout, _stderr # lambda s: _stdout.write(s+"\n")
finally:
sys.stdout = _stdout
sys.stderr = _stderr
def stdin_readline():
try:
line = sys.stdin.readline().rstrip()
if line.startswith("\""):
line = json.loads(line)
_file_contents["<string>"] = line
# print(repr(line))
return line
except Exception as e:
raise QuitError("readline", str(e))
def main():
sigint_twice = False
_globals = {
"__file__": "<string>",
"__name__": "__main__",
"os": os,
"sys": sys,
"time": time,
"json": json,
}
with mock_stdout_stderr() as (stdout, stderr):
# preload
import uiautomator2
_globals['uiautomator2'] = uiautomator2
sys.settrace(gen_tracefunc("<string>", stdout))
stdout.write("DBG:Python (pid: {})\n".format(os.getpid()))
while True:
start = None
try:
# Read exec-code from stdin
if stderr.isatty():
stderr.write(">>> ")
stderr.flush()
line = stdin_readline()
start = time.time()
sigint_twice = False
ret = exec_code(line, _globals)
if ret is not None:
print(ret)
except KeyboardInterrupt:
# Cancel running
if sigint_twice:
break
sigint_twice = True
if start:
stdout.write(
"WRT:" +
json.dumps(">>> Catch Signal KeyboardInterrupt\n"))
stdout.write("\n")
# stdout.write("INFO:KeyboardInterrupt catched, twice quit\n")
except QuitError as e:
stdout.write("DBG:{!r}".format(e))
# Read error from stdin
stdout.write("QUIT\n")
break
except:
# Show traceback
# https://docs.python.org/3/library/traceback.html
flines = traceback.format_exc().splitlines(keepends=True)
print(flines[0] +
"".join(flines[5:]).rstrip()) # ignore top 2 stack-frame
finally:
# Code block finished running
millis = 0 if start is None else (time.time() - start) * 1000
stdout.write("EOF:{}\n".format(int(millis)))
stdout.flush()
if __name__ == "__main__":
main()
|
4a7be39356ccb4c9d0fdf2fe920c46ff5959bd02
|
85c668af40853f5ee48fbe8c4045df1a5dd4104e
|
/examples/simulations/drag_chain.py
|
93c097125402b224f8dd5fa95a26f9d582a527e3
|
[
"MIT",
"LicenseRef-scancode-public-domain",
"OFL-1.1"
] |
permissive
|
marcomusy/vedo
|
771db91bca05cda864fc7d1776d9140726676704
|
9a9f7c5e9ebf135e5c745c521c898866e3ede0ef
|
refs/heads/master
| 2023-08-21T12:56:35.545713
| 2023-08-14T14:39:37
| 2023-08-14T14:39:37
| 110,261,047
| 1,419
| 206
|
MIT
| 2023-09-02T18:38:22
| 2017-11-10T15:17:47
|
Python
|
UTF-8
|
Python
| false
| false
| 721
|
py
|
drag_chain.py
|
"""Forward kinematics: hover the mouse to drag the chain"""
from vedo import Plotter, versor, Plane, Line
n = 15 # number of points
l = 3 # length of one segment
def func(evt):
if not evt.actor:
return
coords = line.points()
coords[0] = evt.picked3d
for i in range(1, n):
v = versor(coords[i] - coords[i-1])
coords[i] = coords[i-1] + v * l
line.points(coords) # update positions
nodes.points(coords)
plt.render()
surf = Plane(s=[60, 60])
line = Line([l*n/2, 0], [-l*n/2, 0], res=n, lw=12)
nodes= line.clone().c('red3').point_size(15)
plt = Plotter()
plt.add_callback("on mouse move please call", func)
plt.show(surf, line, nodes, __doc__, zoom=1.3)
plt.close()
|
e7566ec5cae91afd108459ff96633fdc8b5cb2bf
|
4a75c54952ea37e0c851043d6102c8310477dca0
|
/examples/example_bulkinsert_json.py
|
292a59184b56a968f0ed13dd394774be1b5ccfab
|
[
"Apache-2.0"
] |
permissive
|
milvus-io/pymilvus
|
32424852acb032c37cde26409bc6fd5db4586210
|
ea9328d01db60d54a01f152c965fd5803904a08c
|
refs/heads/master
| 2023-08-28T02:59:50.780914
| 2023-08-23T11:06:17
| 2023-08-23T11:06:17
| 191,751,505
| 683
| 285
|
Apache-2.0
| 2023-09-14T06:15:16
| 2019-06-13T11:38:34
|
Python
|
UTF-8
|
Python
| false
| false
| 18,184
|
py
|
example_bulkinsert_json.py
|
import random
import json
import time
import os
from minio import Minio
from minio.error import S3Error
from pymilvus import (
connections,
FieldSchema, CollectionSchema, DataType,
Collection,
utility,
BulkInsertState,
)
# This example shows how to:
# 1. connect to Milvus server
# 2. create a collection
# 3. create some json files for bulkinsert operation
# 4. call do_bulk_insert()
# 5. wait data to be consumed and indexed
# 6. search
# To run this example
# 1. start a standalone milvus(version >= v2.2.9) instance locally
# make sure the docker-compose.yml has exposed the minio console:
# minio:
# ......
# ports:
# - "9000:9000"
# - "9001:9001"
# command: minio server /minio_data --console-address ":9001"
#
# 2. pip3 install minio
# Local path to generate JSON files
LOCAL_FILES_PATH = "/tmp/milvus_bulkinsert"
# Milvus service address
_HOST = '127.0.0.1'
_PORT = '19530'
# Const names
_COLLECTION_NAME = 'demo_bulk_insert_json'
_ID_FIELD_NAME = 'id_field'
_VECTOR_FIELD_NAME = 'float_vector_field'
_JSON_FIELD_NAME = "json_field"
_VARCHAR_FIELD_NAME = "varchar_field"
_DYNAMIC_FIELD_NAME = "$meta" # dynamic field, the internal name is "$meta", enable_dynamic_field=True
# minio
DEFAULT_BUCKET_NAME = "a-bucket"
MINIO_ADDRESS = "0.0.0.0:9000"
MINIO_SECRET_KEY = "minioadmin"
MINIO_ACCESS_KEY = "minioadmin"
# Vector field parameter
_DIM = 128
# to generate increment ID
id_start = 1
# Create a Milvus connection
def create_connection():
retry = True
while retry:
try:
print(f"\nCreate connection...")
connections.connect(host=_HOST, port=_PORT)
retry = False
except Exception as e:
print("Cannot connect to Milvus. Error: " + str(e))
print(f"Cannot connect to Milvus. Trying to connect Again. Sleeping for: 1")
time.sleep(1)
print(f"\nList connections:")
print(connections.list_connections())
# Create a collection
def create_collection(has_partition_key: bool):
field1 = FieldSchema(name=_ID_FIELD_NAME, dtype=DataType.INT64, description="int64", is_primary=True, auto_id=False)
field2 = FieldSchema(name=_VECTOR_FIELD_NAME, dtype=DataType.FLOAT_VECTOR, description="float vector", dim=_DIM,
is_primary=False)
field3 = FieldSchema(name=_JSON_FIELD_NAME, dtype=DataType.JSON)
# if has partition key, we use this varchar field as partition key field
field4 = FieldSchema(name=_VARCHAR_FIELD_NAME, dtype=DataType.VARCHAR, max_length=256, is_partition_key=has_partition_key)
schema = CollectionSchema(fields=[field1, field2, field3, field4], enable_dynamic_field=True)
if has_partition_key:
collection = Collection(name=_COLLECTION_NAME, schema=schema, num_partitions=10)
else:
collection = Collection(name=_COLLECTION_NAME, schema=schema)
print("\nCollection created:", _COLLECTION_NAME)
return collection
# Test existence of a collection
def has_collection():
return utility.has_collection(_COLLECTION_NAME)
# Drop a collection in Milvus
def drop_collection():
collection = Collection(_COLLECTION_NAME)
collection.drop()
print("\nDrop collection:", _COLLECTION_NAME)
# List all collections in Milvus
def list_collections():
print("\nList collections:")
print(utility.list_collections())
# Create a partition
def create_partition(collection, partition_name):
collection.create_partition(partition_name=partition_name)
print("\nPartition created:", partition_name)
return collection.partition(partition_name)
# Generate a json file with row-based data.
# The json file must contain a root key "rows", its value is a list, each row must contain a value of each field.
# No need to provide the auto-id field "id_field" since milvus will generate it.
# The row-based json file looks like:
# {"rows": [
# {"str_field": "row-based_0", "float_vector_field": [0.190, 0.046, 0.143, 0.972, 0.592, 0.238, 0.266, 0.995]},
# {"str_field": "row-based_1", "float_vector_field": [0.149, 0.586, 0.012, 0.673, 0.588, 0.917, 0.949, 0.944]},
# ......
# ]
# }
def gen_json_rowbased(num, path, partition_name):
global id_start
rows = []
for i in range(num):
rows.append({
_ID_FIELD_NAME: id_start, # id field
_JSON_FIELD_NAME: json.dumps({"Number": id_start, "Name": "book_"+str(id_start)}), # json field
_VECTOR_FIELD_NAME: [round(random.random(), 6) for _ in range(_DIM)], # vector field
_VARCHAR_FIELD_NAME: "{}_{}".format(partition_name, id_start) if partition_name is not None else "description_{}".format(id_start), # varchar field
"dynamic_{}".format(id_start): id_start, # no field matches this value, this value will be put into dynamic field
})
id_start = id_start + 1
data = {
"rows": rows,
}
with open(path, "w") as json_file:
json.dump(data, json_file)
# For row-based files, each file is converted to a task. Each time you can call do_bulk_insert() to insert one file.
# The rootcoord maintains a task list, each idle datanode will receive a task. If no datanode available, the task will
# be put into pending list to wait, the max size of pending list is 32. If new tasks count exceed spare quantity of
# pending list, the do_bulk_insert() method will return error.
# Once a task is finished, the datanode become idle and will receive another task.
#
# By default, the max size of each file is 16GB, this limit is configurable in the milvus.yaml (common.ImportMaxFileSize)
# If a file size is larger than 16GB, the task will fail and you will get error from the "failed_reason" of the task state.
#
# Then, how many segments generated? Let's say the collection's shard number is 2, typically each row-based file
# will be split into 2 segments. So, basically, each task generates segment count is equal to shard number.
# But if a file's data size exceed the segment.maxSize of milvus.yaml, there could be shardNum*2, shardNum*3 segments
# generated, or even more.
def bulk_insert_rowbased(row_count_per_file, file_count, partition_name = None):
# make sure the files folder is created
os.makedirs(name=LOCAL_FILES_PATH, exist_ok=True)
task_ids = []
for i in range(file_count):
data_folder = os.path.join(LOCAL_FILES_PATH, "rows_{}".format(i))
os.makedirs(name=data_folder, exist_ok=True)
file_path = os.path.join(data_folder, "rows_{}.json".format(i))
print("Generate row-based file:", file_path)
gen_json_rowbased(row_count_per_file, file_path, partition_name)
ok, remote_files = upload(data_folder=data_folder)
if ok:
print("Import row-based file:", remote_files)
task_id = utility.do_bulk_insert(collection_name=_COLLECTION_NAME,
partition_name=partition_name,
files=remote_files)
task_ids.append(task_id)
return wait_tasks_competed(task_ids)
# Wait all bulkinsert tasks to be a certain state
# return the states of all the tasks, including failed task
def wait_tasks_to_state(task_ids, state_code):
wait_ids = task_ids
states = []
while True:
time.sleep(2)
temp_ids = []
for id in wait_ids:
state = utility.get_bulk_insert_state(task_id=id)
if state.state == BulkInsertState.ImportFailed or state.state == BulkInsertState.ImportFailedAndCleaned:
print(state)
print("The task", state.task_id, "failed, reason:", state.failed_reason)
continue
if state.state >= state_code:
states.append(state)
continue
temp_ids.append(id)
wait_ids = temp_ids
if len(wait_ids) == 0:
break;
print("Wait {} tasks to be state: {}. Next round check".format(len(wait_ids), BulkInsertState.state_2_name.get(state_code, "unknown")))
return states
# If the state of bulkinsert task is BulkInsertState.ImportCompleted, that means the data file has been parsed and data has been persisted,
# some segments have been created and waiting for index.
# ImportCompleted state doesn't mean the data is queryable, to query the data, you need to wait until the segment is
# indexed successfully and loaded into memory.
def wait_tasks_competed(task_ids):
print("=========================================================================================================")
states = wait_tasks_to_state(task_ids, BulkInsertState.ImportCompleted)
complete_count = 0
for state in states:
if state.state == BulkInsertState.ImportCompleted:
complete_count = complete_count + 1
# print(state)
# if you want to get the auto-generated primary keys, use state.ids to fetch
# print("Auto-generated ids:", state.ids)
print("{} of {} tasks have successfully generated segments, able to be compacted and indexed as normal".format(complete_count, len(task_ids)))
print("=========================================================================================================\n")
return states
# List all bulkinsert tasks, including pending tasks, working tasks and finished tasks.
# the parameter 'limit' is: how many latest tasks should be returned, if the limit<=0, all the tasks will be returned
def list_all_bulk_insert_tasks(collection_name=_COLLECTION_NAME, limit=0):
tasks = utility.list_bulk_insert_tasks(limit=limit, collection_name=collection_name)
print("=========================================================================================================")
print("List bulkinsert tasks with limit", limit)
pending = 0
started = 0
persisted = 0
completed = 0
failed = 0
for task in tasks:
print(task)
if task.state == BulkInsertState.ImportPending:
pending = pending + 1
elif task.state == BulkInsertState.ImportStarted:
started = started + 1
elif task.state == BulkInsertState.ImportPersisted:
persisted = persisted + 1
elif task.state == BulkInsertState.ImportCompleted:
completed = completed + 1
elif task.state == BulkInsertState.ImportFailed:
failed = failed + 1
print("There are {} bulkinsert tasks: {} pending, {} started, {} persisted, {} completed, {} failed"
.format(len(tasks), pending, started, persisted, completed, failed))
print("=========================================================================================================\n")
# Get collection row count.
def get_entity_num(collection):
print("=========================================================================================================")
print("The number of entity:", collection.num_entities)
# Specify an index type
def create_index(collection):
print("Start Creating index IVF_FLAT")
index = {
"index_type": "IVF_FLAT",
"metric_type": "L2",
"params": {"nlist": 128},
}
collection.create_index(_VECTOR_FIELD_NAME, index)
# Load collection data into memory. If collection is not loaded, the search() and query() methods will return error.
def load_collection(collection):
collection.load()
# Release collection data to free memory.
def release_collection(collection):
collection.release()
# ANN search
def search(collection, search_vector, expr = None, consistency_level = "Eventually"):
search_param = {
"expr": expr,
"data": [search_vector],
"anns_field": _VECTOR_FIELD_NAME,
"param": {"metric_type": "L2", "params": {"nprobe": 10}},
"limit": 5,
"output_fields": [_JSON_FIELD_NAME, _VARCHAR_FIELD_NAME, _DYNAMIC_FIELD_NAME],
"consistency_level": consistency_level,
}
print("search..." if expr is None else "hybrid search...")
results = collection.search(**search_param)
print("=========================================================================================================")
result = results[0]
for j, res in enumerate(result):
print(f"\ttop{j}: {res}")
print("\thits count:", len(result))
print("=========================================================================================================\n")
# Delete entities
def delete(collection, ids):
print("=========================================================================================================\n")
print("Delete these entities:", ids)
expr = _ID_FIELD_NAME + " in " + str(ids)
collection.delete(expr=expr)
print("=========================================================================================================\n")
# Retrieve entities
def retrieve(collection, ids):
print("=========================================================================================================")
print("Retrieve these entities:", ids)
expr = _ID_FIELD_NAME + " in " + str(ids)
result = collection.query(expr=expr, output_fields=[_JSON_FIELD_NAME, _VARCHAR_FIELD_NAME, _VECTOR_FIELD_NAME, _DYNAMIC_FIELD_NAME])
for item in result:
print(item)
print("=========================================================================================================\n")
return result
# Upload data files to minio
def upload(data_folder: str,
bucket_name: str=DEFAULT_BUCKET_NAME)->(bool, list):
if not os.path.exists(data_folder):
print("Data path '{}' doesn't exist".format(data_folder))
return False, []
remote_files = []
try:
print("Prepare upload files")
minio_client = Minio(endpoint=MINIO_ADDRESS, access_key=MINIO_ACCESS_KEY, secret_key=MINIO_SECRET_KEY, secure=False)
found = minio_client.bucket_exists(bucket_name)
if not found:
print("MinIO bucket '{}' doesn't exist".format(bucket_name))
return False, []
remote_data_path = "milvus_bulkinsert"
def upload_files(folder:str):
for parent, dirnames, filenames in os.walk(folder):
if parent is folder:
for filename in filenames:
ext = os.path.splitext(filename)
if len(ext) != 2 or (ext[1] != ".json" and ext[1] != ".npy"):
continue
local_full_path = os.path.join(parent, filename)
minio_file_path = os.path.join(remote_data_path, os.path.basename(folder), filename)
minio_client.fput_object(bucket_name, minio_file_path, local_full_path)
print("Upload file '{}' to '{}'".format(local_full_path, minio_file_path))
remote_files.append(minio_file_path)
for dir in dirnames:
upload_files(os.path.join(parent, dir))
upload_files(data_folder)
except S3Error as e:
print("Failed to connect MinIO server {}, error: {}".format(MINIO_ADDRESS, e))
return False, []
print("Successfully upload files: {}".format(remote_files))
return True, remote_files
def main(has_partition_key: bool):
# create a connection
create_connection()
# drop collection if the collection exists
if has_collection():
drop_collection()
# create collection
collection = create_collection(has_partition_key)
# specify an index type
create_index(collection)
# load data to memory
load_collection(collection)
# show collections
list_collections()
# do bulk_insert, wait all tasks finish persisting
row_count_per_file = 100000
if has_partition_key:
# automatically partitioning
bulk_insert_rowbased(row_count_per_file=row_count_per_file, file_count=2)
else:
# bulklinsert into default partition
bulk_insert_rowbased(row_count_per_file=row_count_per_file, file_count=1)
# create a partition, bulkinsert into the partition
a_partition = "part_1"
create_partition(collection, a_partition)
bulk_insert_rowbased(row_count_per_file=row_count_per_file, file_count=1, partition_name=a_partition)
# list all tasks
list_all_bulk_insert_tasks()
# get the number of entities
get_entity_num(collection)
print("Waiting index complete and refresh segments list to load...")
utility.wait_for_index_building_complete(_COLLECTION_NAME)
collection.load(_refresh = True)
# pick some entities
pick_ids = [50, row_count_per_file + 99]
id_vectors = retrieve(collection, pick_ids)
# search the picked entities, they are in result at the top0
for id_vector in id_vectors:
id = id_vector[_ID_FIELD_NAME]
vector = id_vector[_VECTOR_FIELD_NAME]
print("Search id:", id, ", compare this id to the top0 of search result, they are equal")
search(collection, vector)
# delete the picked entities
delete(collection, pick_ids)
# search the deleted entities, they are not in result anymore
for id_vector in id_vectors:
id = id_vector[_ID_FIELD_NAME]
vector = id_vector[_VECTOR_FIELD_NAME]
print("Search id:", id, ", compare this id to the top0 result, they are not equal since the id has been deleted")
# here we use Strong consistency level to do search, because we need to make sure the delete operation is applied
search(collection, vector, consistency_level="Strong")
# search by filtering the varchar field
vector = [round(random.random(), 6) for _ in range(_DIM)]
search(collection, vector, expr="{} like \"description_33%\"".format(_VARCHAR_FIELD_NAME))
# release memory
release_collection(collection)
# drop collection
drop_collection()
if __name__ == '__main__':
# change this value if you want to test bulkinert with partition key
# Note: bulkinsert supports partition key from Milvus v2.2.12
has_partition_key = False
main(has_partition_key)
|
9d093d8935bea3931164ad6611736e88c0e2d663
|
a63d907ad63ba6705420a6fb2788196d1bd3763c
|
/src/datamgr/datamanager/collection/templates/tencent/constants.py
|
8072c4f23f35288dc9b9f8ae0f12bb1a97c5915e
|
[
"MIT"
] |
permissive
|
Tencent/bk-base
|
a38461072811667dc2880a13a5232004fe771a4b
|
6d483b4df67739b26cc8ecaa56c1d76ab46bd7a2
|
refs/heads/master
| 2022-07-30T04:24:53.370661
| 2022-04-02T10:30:55
| 2022-04-02T10:30:55
| 381,257,882
| 101
| 51
|
NOASSERTION
| 2022-04-02T10:30:56
| 2021-06-29T06:10:01
|
Python
|
UTF-8
|
Python
| false
| false
| 6,463
|
py
|
constants.py
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
CMDB_HOST_FIELDS = [
"bk_biz_id",
"bk_relations",
"bk_asset_id",
"bk_bak_operator",
"bk_bs_info",
"bk_cloud_host_status",
"bk_cloud_id",
"bk_cloud_inst_id",
"bk_cloud_vendor",
"bk_comment",
"bk_cpu",
"bk_cpu_mhz",
"bk_cpu_module",
"bk_disk",
"bk_host_id",
"bk_host_innerip",
"bk_host_name",
"bk_host_outerip",
"bk_idc_area",
"bk_idc_area_id",
"bk_inner_equip_id",
"bk_inner_net_idc",
"bk_inner_switch_ip",
"bk_ip_oper_name",
"bk_is_virtual",
"bk_isp_name",
"bk_logic_zone",
"bk_logic_zone_id",
"bk_mac",
"bk_manage_type",
"bk_mem",
"bk_os_bit",
"bk_os_name",
"bk_os_type",
"bk_os_version",
"bk_outer_equip_id",
"bk_outer_mac",
"bk_outer_switch_ip",
"bk_position_name",
"bk_product",
"bk_province_name",
"bk_service_arr",
"bk_sla",
"bk_sn",
"bk_state",
"bk_state_name",
"bk_str_version",
"bk_supplier_account",
"bk_svc_id_arr",
"bk_svr_device_cls_name",
"bk_svr_type_id",
"bk_zone_name",
"classify_level_name",
"clb_vip",
"create_time",
"dbrole",
"dept_name",
"domain",
"group_name",
"hard_memo",
"idc_city_id",
"idc_city_name",
"idc_id",
"idc_name",
"idc_unit_id",
"idc_unit_name",
"import_from",
"inner_network_segment",
"inner_switch_port",
"is_special",
"last_time",
"logic_domain",
"logic_domain_id",
"module_name",
"net_device_id",
"net_struct_id",
"net_struct_name",
"operator",
"outer_network_segment",
"outer_switch_port",
"rack",
"rack_id",
"raid_id",
"raid_name",
"srv_important_level",
"srv_out_band_manage_type",
"srv_status",
"sub_zone",
"sub_zone_id",
"svr_device_class",
"svr_device_id",
"svr_device_type_id",
"svr_device_type_name",
"svr_first_time",
"svr_id",
"svr_input_time",
"svr_out_band_type",
"svr_type_id",
"svr_type_name",
"接入iFix",
]
CMDB_MODULE_FIELDS = [
"bk_bak_operator",
"bk_biz_id",
"bk_db_charset",
"bk_db_ip",
"bk_db_port",
"bk_db_type",
"bk_is_single_point",
"bk_machine_type",
"bk_module_id",
"bk_module_name",
"bk_module_type",
"bk_need_phone_warn",
"bk_parent_id",
"bk_phone_warn_end",
"bk_phone_warn_start",
"bk_set_id",
"bk_supplier_account",
"bk_svr_dns",
"bk_svr_port",
"bk_validity",
"bk_wan_need",
"bs3_name_id",
"create_time",
"creator",
"default",
"host_apply_enabled",
"last_time",
"operator",
"service_category_id",
"service_template_id",
"set_template_id",
]
CMDB_RELATION_FIELDS = [
"bk_biz_id",
"bk_module_id",
"bk_supplier_account",
"bk_host_id",
"bk_set_id",
]
CMDB_SET_FIELDS = [
"IDC",
"TclsWorldID",
"bk_alias_name",
"bk_biz_id",
"bk_capacity",
"bk_category",
"bk_chn_name",
"bk_customer",
"bk_enable_relate_webplat",
"bk_is_gcs",
"bk_open_time",
"bk_operation_state",
"bk_outer_source",
"bk_parent_id",
"bk_platform",
"bk_service_status",
"bk_set_desc",
"bk_set_env",
"bk_set_id",
"bk_set_name",
"bk_supplier_account",
"bk_svc_name",
"bk_system",
"bk_uniq_id",
"bk_world_id",
"create_time",
"creator",
"default",
"last_time",
"set_template_id",
"worldID",
"中文名称",
"大区ID",
]
CMDB_BIZ_FIELDS = [
"bk_biz_tester",
"time_zone",
"bk_biz_developer",
"bk_biz_maintainer",
"operator",
"life_cycle",
"bk_biz_name",
"language",
"bk_biz_productor",
"bs1_name",
"bs2_name",
"bs3_name_id",
"bs1_name_id",
"bs2_name_id",
"bs3_name",
"bk_app_type",
"bk_app_game_typeid",
"bk_dept_name_id",
"bk_source_id",
"bk_oper_plan",
"bk_app_abbr",
"bk_oper_grp_name",
"bk_app_summary",
"bk_app_director",
"bk_app_devteam",
"bk_arc_doc",
"bk_app_user_manual",
"bk_app_oper_manual",
"bk_app_url",
"bk_app_dev_bak",
"bk_app_forum_url",
"bk_bip_app_name",
"bk_bip_id",
"bk_product_name",
"bk_pmp_logo",
"bk_pmp_sens_col",
"bk_pmp_ope_pm",
"bk_pmp_ope_expert",
"bk_pmp_sa",
"bk_dba_bak",
"bk_pmp_dba_major",
"bk_pmp_safe_man",
"bk_pmp_oper_dev_man",
"bk_pmp_oss_man",
"bk_pmp_idip_man",
"bk_pmp_potl_man",
"bk_pmp_cm_man",
"bk_tlog_man",
"bk_pmp_svc_pm",
"bk_pmp_cmreqman",
"bk_pmp_qc",
"bk_pmp_qa",
"bk_pmp_com_plot",
"bk_vaskey_id",
"bk_vipdl_id",
"bk_pmp_group_user",
"bk_is_bip",
"bk_pmp_test_tm",
"bk_tcls_id",
"bk_tcm_id",
"bk_mbl_qq_appid",
"bk_idip_id",
"bk_visitor_appid",
"bk_wechat_appid",
"bk_alarm_rvc_man",
"bk_test_resource",
"bk_operate_dept_id",
"bk_operate_dept_name",
"bk_biz_id",
"create_time",
"last_time",
]
BKDATA_BIZ_ID = 591
|
5d903307091034dcdf7108e1b28507ffcb543619
|
dbe83cf6c2b78a61def862ca19625c2f78268af8
|
/ibis/common/tests/test_temporal.py
|
dce932383aa3cd1fbf9de86f8df028879a492aa9
|
[
"Apache-2.0"
] |
permissive
|
ibis-project/ibis
|
56a169d75805db7dfd39192cf0562521c405ff1c
|
3866492906d731dc170b560e7d7471bd4855169a
|
refs/heads/master
| 2023-09-01T17:07:38.854510
| 2023-09-01T13:52:08
| 2023-09-01T15:32:04
| 34,139,230
| 2,304
| 384
|
Apache-2.0
| 2023-09-14T21:52:21
| 2015-04-17T20:43:46
|
Python
|
UTF-8
|
Python
| false
| false
| 7,787
|
py
|
test_temporal.py
|
from __future__ import annotations
import itertools
from datetime import date, datetime, time, timedelta, timezone
import dateutil
import pandas as pd
import pytest
import pytz
from packaging.version import parse as vparse
from pytest import param
from ibis.common.patterns import CoercedTo
from ibis.common.temporal import (
DateUnit,
IntervalUnit,
TimeUnit,
normalize_datetime,
normalize_timedelta,
normalize_timezone,
)
from ibis.conftest import WINDOWS
interval_units = pytest.mark.parametrize(
["singular", "plural", "short"],
[
("year", "years", "Y"),
("quarter", "quarters", "Q"),
("month", "months", "M"),
("week", "weeks", "W"),
("day", "days", "D"),
("hour", "hours", "h"),
("minute", "minutes", "m"),
("second", "seconds", "s"),
("millisecond", "milliseconds", "ms"),
("microsecond", "microseconds", "us"),
("nanosecond", "nanoseconds", "ns"),
],
)
@interval_units
def test_interval_units(singular, plural, short):
u = IntervalUnit[singular.upper()]
assert u.singular == singular
assert u.plural == plural
assert u.short == short
@interval_units
def test_interval_unit_coercions(singular, plural, short):
u = IntervalUnit[singular.upper()]
v = CoercedTo(IntervalUnit)
assert v.match(singular, {}) == u
assert v.match(plural, {}) == u
assert v.match(short, {}) == u
@pytest.mark.parametrize(
("alias", "expected"),
[
("HH24", "h"),
("J", "D"),
("MI", "m"),
("SYYYY", "Y"),
("YY", "Y"),
("YYY", "Y"),
("YYYY", "Y"),
],
)
def test_interval_unit_aliases(alias, expected):
v = CoercedTo(IntervalUnit)
assert v.match(alias, {}) == IntervalUnit(expected)
@pytest.mark.parametrize(
("value", "unit", "expected"),
[
(1, IntervalUnit.DAY, 1),
(1, IntervalUnit.HOUR, 1),
(1, IntervalUnit.MINUTE, 1),
(1, IntervalUnit.SECOND, 1),
(1, IntervalUnit.MILLISECOND, 1),
(1, IntervalUnit.MICROSECOND, 1),
(timedelta(days=1), IntervalUnit.DAY, 1),
(timedelta(hours=1), IntervalUnit.HOUR, 1),
(timedelta(minutes=1), IntervalUnit.MINUTE, 1),
(timedelta(seconds=1), IntervalUnit.SECOND, 1),
(timedelta(milliseconds=1), IntervalUnit.MILLISECOND, 1),
(timedelta(microseconds=1), IntervalUnit.MICROSECOND, 1),
(timedelta(seconds=1, milliseconds=100), IntervalUnit.MILLISECOND, 1100),
(timedelta(seconds=1, milliseconds=21), IntervalUnit.MICROSECOND, 1021000),
],
)
def test_normalize_timedelta(value, unit, expected):
assert normalize_timedelta(value, unit) == expected
@pytest.mark.parametrize(
("value", "unit"),
[
(timedelta(days=1), IntervalUnit.YEAR),
(timedelta(days=1), IntervalUnit.QUARTER),
(timedelta(days=1), IntervalUnit.MONTH),
(timedelta(days=1), IntervalUnit.WEEK),
(timedelta(hours=1), IntervalUnit.DAY),
(timedelta(minutes=1), IntervalUnit.HOUR),
(timedelta(seconds=1), IntervalUnit.MINUTE),
(timedelta(milliseconds=1), IntervalUnit.SECOND),
(timedelta(microseconds=1), IntervalUnit.MILLISECOND),
(timedelta(days=1, microseconds=100), IntervalUnit.MILLISECOND),
],
)
def test_normalize_timedelta_invalid(value, unit):
with pytest.raises(ValueError):
normalize_timedelta(value, unit)
def test_interval_unit_compatibility():
v = CoercedTo(IntervalUnit)
for unit in itertools.chain(DateUnit, TimeUnit):
interval = v.match(unit, {})
assert isinstance(interval, IntervalUnit)
assert unit.value == interval.value
@pytest.mark.parametrize(
("value", "expected"),
[
(None, None),
(pytz.UTC, pytz.UTC),
("UTC", dateutil.tz.tzutc()),
("Europe/Budapest", dateutil.tz.gettz("Europe/Budapest")),
(+2, timezone(timedelta(seconds=7200))),
(-2, timezone(timedelta(seconds=-7200))),
(dateutil.tz.tzoffset(None, 3600), timezone(timedelta(seconds=3600))),
],
)
def test_normalize_timezone(value, expected):
assert normalize_timezone(value) == expected
@pytest.mark.parametrize(
("value", "expected"),
[
# datetime object
(datetime(2017, 1, 1), datetime(2017, 1, 1)),
(datetime(2017, 1, 1, 0, 0, 0, 1), datetime(2017, 1, 1, 0, 0, 0, 1)),
(
datetime(2017, 1, 1, 0, 0, 0, 1, tzinfo=timezone.utc),
datetime(2017, 1, 1, 0, 0, 0, 1, tzinfo=dateutil.tz.UTC),
),
# date object
(datetime(2017, 1, 1).date(), datetime(2017, 1, 1)),
# pandas timestamp object
(pd.Timestamp("2017-01-01"), datetime(2017, 1, 1)),
(pd.Timestamp("2017-01-01 00:00:00.000001"), datetime(2017, 1, 1, 0, 0, 0, 1)),
# pandas timestamp object with timezone
(
pd.Timestamp("2017-01-01 00:00:00.000001+00:00"),
datetime(2017, 1, 1, 0, 0, 0, 1, tzinfo=dateutil.tz.UTC),
),
(
pd.Timestamp("2017-01-01 00:00:00.000001+01:00"),
datetime(2017, 1, 1, 0, 0, 0, 1, tzinfo=dateutil.tz.tzoffset(None, 3600)),
),
# datetime string
("2017-01-01", datetime(2017, 1, 1)),
("2017-01-01 00:00:00.000001", datetime(2017, 1, 1, 0, 0, 0, 1)),
# datetime string with timezone offset
(
"2017-01-01 00:00:00.000001+00:00",
datetime(2017, 1, 1, 0, 0, 0, 1, tzinfo=dateutil.tz.UTC),
),
(
"2017-01-01 00:00:00.000001+01:00",
datetime(2017, 1, 1, 0, 0, 0, 1, tzinfo=dateutil.tz.tzoffset(None, 3600)),
),
# datetime string with timezone
(
"2017-01-01 00:00:00.000001 UTC",
datetime(2017, 1, 1, 0, 0, 0, 1, tzinfo=dateutil.tz.UTC),
),
(
"2017-01-01 00:00:00.000001 GMT",
datetime(2017, 1, 1, 0, 0, 0, 1, tzinfo=dateutil.tz.UTC),
),
# plain integer
(1000, datetime(1970, 1, 1, 0, 16, 40)),
# floating point
(1000.123, datetime(1970, 1, 1, 0, 16, 40, 123000)),
# time object
(time(0, 0, 0, 1), datetime.combine(date.today(), time(0, 0, 0, 1))),
],
)
def test_normalize_datetime(value, expected):
result = normalize_datetime(value)
assert result == expected
@pytest.mark.parametrize(
("value", "expected"),
[
# timezone naive datetime
(datetime(2017, 1, 1), None),
(datetime(2017, 1, 1, 0, 0, 0, 1), None),
# timezone aware datetime
(datetime(2022, 1, 1, 0, 0, 0, 1, tzinfo=dateutil.tz.UTC), "UTC"),
# timezone aware datetime with timezone offset
(
datetime(2022, 1, 1, 0, 0, 0, 1, tzinfo=dateutil.tz.tzoffset(None, 3600)),
"UTC+01:00",
),
# timezone aware datetime with timezone name
(datetime(2022, 1, 1, 0, 0, 0, 1, tzinfo=dateutil.tz.gettz("CET")), "CET"),
# pandas timestamp with timezone
(pd.Timestamp("2022-01-01 00:00:00.000001+00:00"), "UTC"),
param(
pd.Timestamp("2022-01-01 00:00:00.000001+01:00"),
"UTC+01:00",
marks=pytest.mark.xfail(
vparse(pd.__version__) < vparse("2.0.0") and not WINDOWS,
reason=(
"tzdata is missing in pandas < 2.0.0 due to an incorrect marker "
"in the tzdata package specification that restricts its installation "
"to windows only"
),
),
),
],
)
def test_normalized_datetime_tzname(value, expected):
result = normalize_datetime(value)
assert result.tzname() == expected
|
0ad6ed0ec1b49ef7164b9dfcc1bd1d6c27ab9a49
|
36272b48713793e497c74fe96b94bb389490db42
|
/Python/Algorithms/Maths/decimalToN_base.py
|
e527d4f99954e67c67d0c691148693d521e416ab
|
[
"MIT"
] |
permissive
|
Py-Contributors/AlgorithmsAndDataStructure
|
d044734fdae300f9320680eaf3fc793a4b3f071b
|
6b9b8b0dfe4543ec2ca191a5290415e36ae06dbd
|
refs/heads/master
| 2023-09-02T17:48:28.712631
| 2023-08-15T05:11:48
| 2023-08-15T05:11:48
| 260,388,831
| 660
| 760
|
MIT
| 2023-09-07T02:17:02
| 2020-05-01T05:46:35
|
C++
|
UTF-8
|
Python
| false
| false
| 557
|
py
|
decimalToN_base.py
|
def conversion(value: int, base: int):
data = []
while value >= base:
data.append(int(value % base))
value = int(value / base)
data.append(value)
data.reverse()
for i in range(0, len(data)):
if data[i] > 9:
temp = data[i] - 10
data.remove(data[i])
temp += 65
data.insert(i, chr(temp))
return data
val = int(input("Enter decimal value: "))
bas = int(input("Base you want: "))
d = conversion(val, bas)
print("Converted value is: ")
print(' '.join(map(str, d)))
|
260708b0c248249af199a437c88bfeec4e39d959
|
080db1ae362de6823e7c78ab6071c82e347ce967
|
/tests/integration/hot_reload/exec2/helper.py
|
42ec39ef559cf961affcac3041ac02a054ad2d6e
|
[
"Apache-2.0"
] |
permissive
|
jina-ai/jina
|
c06898dc31dd3de1f917f30305e9460efedf97c3
|
23c7b8c78fc4ad67d16d83fc0c9f0eae9e935e71
|
refs/heads/master
| 2023-08-30T23:04:45.267920
| 2023-08-24T13:49:49
| 2023-08-24T13:49:49
| 240,315,046
| 20,687
| 2,460
|
Apache-2.0
| 2023-09-14T12:30:45
| 2020-02-13T17:04:44
|
Python
|
UTF-8
|
Python
| false
| false
| 57
|
py
|
helper.py
|
def get_doc_value():
return 'MyExecutorBeforeReload'
|
ad0ab4eeebb9778736695953b6c35910a70a585a
|
fbdc48c28e54fb33ae4842ef95ff63893902c99a
|
/scripts/examples/10-Arduino-Boards/Nicla-Vision/03-WiFi/dns.py
|
13d1c28646438f8541975ee2dde1de5ef8d3ac0f
|
[
"MIT"
] |
permissive
|
openmv/openmv
|
44d4b79fc8693950a2e330e5e0fd95b5c36e230f
|
8a90e070a88b7fc14c87a00351b9c4a213278419
|
refs/heads/master
| 2023-08-30T20:59:57.227603
| 2023-08-23T16:50:55
| 2023-08-23T16:50:55
| 14,360,940
| 2,150
| 1,226
|
MIT
| 2023-09-14T07:18:15
| 2013-11-13T10:23:44
|
C
|
UTF-8
|
Python
| false
| false
| 539
|
py
|
dns.py
|
# DNS Example
#
# This example shows how to get the IP address for websites via DNS.
import network
import usocket
SSID = "" # Network SSID
KEY = "" # Network key
# Init wlan module and connect to network
wlan = network.WLAN(network.STA_IF)
wlan.active(True)
wlan.connect(SSID, KEY)
while not wlan.isconnected():
print('Trying to connect to "{:s}"...'.format(SSID))
time.sleep_ms(1000)
# We should have a valid IP now via DHCP
print("WiFi Connected ", wlan.ifconfig())
print(usocket.getaddrinfo("www.google.com", 80)[0][4])
|
cb04a40518c1967d2a3ea6b15436670e08d91dfb
|
ff9a29eae7234c4477a048c1bf97611ce05e67ba
|
/graphene_django_extras/filters/__init__.py
|
175cac71b88f7fca3f1066e1c8755384b4d1621a
|
[
"MIT"
] |
permissive
|
eamigo86/graphene-django-extras
|
c315a56e238da8edbdcd1a0dd603207ac27705a9
|
209ae496084562f39749c646af2c4fb094f1ebd8
|
refs/heads/master
| 2023-08-22T06:36:56.478466
| 2023-05-08T06:43:08
| 2023-05-08T06:43:08
| 104,230,962
| 431
| 129
|
MIT
| 2023-06-21T17:21:52
| 2017-09-20T15:06:04
|
Python
|
UTF-8
|
Python
| false
| false
| 346
|
py
|
__init__.py
|
# -*- coding: utf-8 -*-
from .lookups import (
ALL_LOOKUPS,
BASIC_LOOKUPS,
COMMON_LOOKUPS,
DATE_LOOKUPS,
DATETIME_LOOKUPS,
NUMBER_LOOKUPS,
TIME_LOOKUPS,
)
__all__ = (
"ALL_LOOKUPS",
"BASIC_LOOKUPS",
"COMMON_LOOKUPS",
"NUMBER_LOOKUPS",
"DATETIME_LOOKUPS",
"DATE_LOOKUPS",
"TIME_LOOKUPS",
)
|
4c0757503ca98c91ec0f6bf6e2f70c3de22177b4
|
0577a46d8d28e1fd8636893bbdd2b18270bb8eb8
|
/chromium/third_party/android_crazy_linker/src/tests/generate_test_elf_hash_tables.py
|
30276426ce8f41310d94eb6a31e7c19c400feef4
|
[
"Apache-2.0",
"BSD-3-Clause",
"BSD-2-Clause",
"LGPL-2.0-or-later",
"MIT",
"GPL-1.0-or-later",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
ric2b/Vivaldi-browser
|
388a328b4cb838a4c3822357a5529642f86316a5
|
87244f4ee50062e59667bf8b9ca4d5291b6818d7
|
refs/heads/master
| 2022-12-21T04:44:13.804535
| 2022-12-17T16:30:35
| 2022-12-17T16:30:35
| 86,637,416
| 166
| 41
|
BSD-3-Clause
| 2021-03-31T18:49:30
| 2017-03-29T23:09:05
| null |
UTF-8
|
Python
| false
| false
| 4,273
|
py
|
generate_test_elf_hash_tables.py
|
#!/usr/bin/env python
# Copyright 2018 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Simple script used to generate the SysV ELF hash table test data"""
import collections
import os
from pylib import source_utils
from pylib import elf_utils
script_name = os.path.basename(__file__)
def ElfHash(name):
"""Compute the ELF hash of a given input string."""
h = 0
for c in name:
h = (h << 4) + ord(c)
g = h & 0xf0000000
h ^= g
h ^= g >> 24
return h & 0xffffffff
class ElfHashTable(object):
def __init__(self, num_buckets, symbol_names):
"""Initialize a new SysV ELF hash table instance.
Args:
num_buckets: Number of hash buckets, must be > 0.
symbol_names: List of symbol names.
"""
self.num_buckets_ = num_buckets
self.num_chain_ = len(symbol_names) + 1
self.symbols_ = symbol_names
self.hashes_ = [ElfHash(t) for t in symbol_names]
# Build bucket and chain arrays.
buckets = [0] * num_buckets
chain = [0] * self.num_chain_
for n, symbol in enumerate(self.symbols_):
elf_hash = self.hashes_[n]
bucket_index = elf_hash % num_buckets
idx = buckets[bucket_index]
if idx == 0:
buckets[bucket_index] = n + 1
else:
while chain[idx] != 0:
idx = chain[idx]
chain[idx] = n + 1
self.buckets_ = buckets
self.chain_ = chain
# Generate final string table and symbol offsets.
self.string_table_, self.symbol_offsets_ = \
elf_utils.GenerateStringTable(self.symbols_)
def __str__(self):
"""Dump human-friendly text description for this table."""
out = 'SysV ELF hash table: num_buckets=%d num_chain=%d\n\n' % (
self.num_buckets_, self.num_chain_)
out += 'idx symbol hash bucket chain\n'
out += ' 0 <STN_UNDEF>\n'
for n, symbol in enumerate(self.symbols_):
elf_hash = self.hashes_[n]
bucket_index = elf_hash % self.num_buckets_
out += '%3d %-20s %08x %-3d %d\n' % (
n + 1, symbol, elf_hash, bucket_index, self.chain_[n + 1])
out += '\nBuckets: '
comma = ''
for b in self.buckets_:
out += '%s%d' % (comma, b)
comma = ', '
out += '\n'
return out
def AsCSource(self, variable_prefix, guard_macro_name):
"""Dump the content of this instance."""
out = source_utils.CSourceBeginAutoGeneratedHeader(script_name,
guard_macro_name)
out += source_utils.CSourceForComments(str(self))
out += source_utils.CSourceForConstCharArray(
self.string_table_, 'k%sStringTable' % variable_prefix)
out += '\n'
out += elf_utils.CSourceForElfSymbolListMacro(variable_prefix,
self.symbols_,
self.symbol_offsets_)
out += '\n'
out += elf_utils.CSourceForElfSymbolTable(variable_prefix,
self.symbols_,
self.symbol_offsets_)
out += '\nstatic const uint32_t k%sHashTable[] = {\n' % variable_prefix
out += ' %d, // num_buckets\n' % self.num_buckets_
out += ' %d, // num_chain\n' % self.num_chain_
out += ' // Buckets\n'
out += source_utils.CSourceForIntegerHexArray(self.buckets_, 32)
out += ' // Chain\n'
out += source_utils.CSourceForIntegerHexArray(self.chain_, 32)
out += '};\n'
out += source_utils.CSourceEndAutoGeneratedHeader(script_name,
guard_macro_name)
return out
def main():
# Same data as the one found on the following web page:
#
# https://flapenguin.me/2017/04/24/elf-lookup-dt-hash/
#
# NOTE: The hash values on that page are incorrect, so results differs!!
#
table = ElfHashTable(4, [
'isnan', 'freelocal', 'hcreate_', 'getopt_long_onl', 'endrpcen',
'pthread_mutex_lock', 'isinf', 'setrlimi', 'getspen', 'umoun',
'strsigna', 'listxatt', 'gettyen', 'uselib', 'cfsetispeed'])
print table.AsCSource('TestElf', 'CRAZY_LINKER_ELF_HASH_TABLE_TEST_DATA_H')
if __name__ == "__main__":
main()
|
3690755a21e733312e13b44847ab9b86cfa2c4f4
|
39b021eabbb8e3be1734cf92fd641965a796b0eb
|
/deepchem/models/torch_models/__init__.py
|
618814449e50933f7763535616b47a73aa99d8b2
|
[
"MIT"
] |
permissive
|
deepchem/deepchem
|
066cbf42316b2f6bec0166727e0264a485d5266f
|
ee6e67ebcf7bf04259cf13aff6388e2b791fea3d
|
refs/heads/master
| 2023-09-02T01:32:17.860111
| 2023-08-31T18:49:00
| 2023-08-31T18:49:00
| 43,098,215
| 4,876
| 1,905
|
MIT
| 2023-09-14T19:10:44
| 2015-09-24T23:20:28
|
Python
|
UTF-8
|
Python
| false
| false
| 2,417
|
py
|
__init__.py
|
# flake8:noqa
import logging
logger = logging.getLogger(__name__)
from deepchem.models.torch_models.torch_model import TorchModel
from deepchem.models.torch_models.modular import ModularTorchModel
from deepchem.models.torch_models.attentivefp import AttentiveFP, AttentiveFPModel
from deepchem.models.torch_models.cgcnn import CGCNN, CGCNNModel
from deepchem.models.torch_models.gat import GAT, GATModel
from deepchem.models.torch_models.gcn import GCN, GCNModel
from deepchem.models.torch_models.infograph import InfoGraphStar, InfoGraphStarModel, InfoGraphEncoder, GINEncoder, InfoGraph, InfoGraphModel, InfoGraphEncoder
from deepchem.models.torch_models.mpnn import MPNN, MPNNModel
from deepchem.models.torch_models.lcnn import LCNN, LCNNModel
from deepchem.models.torch_models.pagtn import Pagtn, PagtnModel
from deepchem.models.torch_models.mat import MAT, MATModel
from deepchem.models.torch_models.megnet import MEGNetModel
from deepchem.models.torch_models.normalizing_flows_pytorch import NormalizingFlow
from deepchem.models.torch_models.layers import MultilayerPerceptron, CNNModule, CombineMeanStd, WeightedLinearCombo, AtomicConvolution, NeighborList, SetGather, EdgeNetwork, WeaveLayer, WeaveGather, MolGANConvolutionLayer, MolGANAggregationLayer, MolGANMultiConvolutionLayer, MolGANEncoderLayer, EncoderRNN
from deepchem.models.torch_models.cnn import CNN
from deepchem.models.torch_models.attention import ScaledDotProductAttention, SelfAttention
from deepchem.models.torch_models.grover import GroverModel, GroverPretrain, GroverFinetune
from deepchem.models.torch_models.readout import GroverReadout
from deepchem.models.torch_models.dtnn import DTNN, DTNNModel
try:
from deepchem.models.torch_models.dmpnn import DMPNN, DMPNNModel
from deepchem.models.torch_models.gnn import GNN, GNNHead, GNNModular
from deepchem.models.torch_models.pna_gnn import AtomEncoder, BondEncoder, PNALayer, PNAGNN, PNA
from deepchem.models.torch_models.gnn3d import Net3D, InfoMax3DModular
except ModuleNotFoundError as e:
logger.warning(
f'Skipped loading modules with pytorch-geometric dependency, missing a dependency. {e}'
)
try:
from deepchem.models.torch_models.hf_models import HuggingFaceModel
from deepchem.models.torch_models.chemberta import Chemberta
except ModuleNotFoundError as e:
logger.warning(f'Skipped loading modules with transformers dependency. {e}')
|
62c3f39e9197c6cd1e4b4b76525f7d4362a2db73
|
551990e68feda34d2a9173b05cc3a7259f4e8c9a
|
/direct/nn/multidomainnet/multidomainnet.py
|
1b9ac395b2e7e4e135131ef3ce69ced2aa211282
|
[
"Apache-2.0"
] |
permissive
|
NKI-AI/direct
|
a5c1ca0cb75d709b62e94ff76aba361e188d2d59
|
2a4c29342bc52a404aae097bc2654fb4323e1ac8
|
refs/heads/main
| 2023-08-03T11:37:52.941124
| 2023-06-28T14:11:56
| 2023-06-28T14:11:56
| 269,966,010
| 151
| 35
|
Apache-2.0
| 2023-06-28T14:11:58
| 2020-06-06T11:53:07
|
Python
|
UTF-8
|
Python
| false
| false
| 5,599
|
py
|
multidomainnet.py
|
# coding=utf-8
# Copyright (c) DIRECT Contributors
from typing import Callable
import torch
import torch.nn as nn
import direct.data.transforms as T
from direct.nn.multidomainnet.multidomain import MultiDomainUnet2d
class StandardizationLayer(nn.Module):
r"""Multi-channel data standardization method. Inspired by AIRS model submission to the Fast MRI 2020 challenge. Given individual coil images :math:`\{x_i\}_{i=1}^{N_c}` and sensitivity coil maps :math:`\{S_i\}_{i=1}^{N_c}` it returns
.. math::
[(x_{\text{sense}}, {x_{\text{res}}}_1), ..., (x_{\text{sense}}, {x_{\text{res}}}_{N_c})]
where :math:`{x_{\text{res}}}_i = xi - S_i \times x_{\text{sense}}` and :math:`x_{\text{sense}} = \sum_{i=1}^{N_c} {S_i}^{*} \times x_i`.
"""
def __init__(self, coil_dim: int = 1, channel_dim: int = -1):
"""Inits :class:`StandardizationLayer`.
Parameters
----------
coil_dim: int
Coil dimension. Default: 1.
channel_dim: int
Channel dimension. Default: -1.
"""
super().__init__()
self.coil_dim = coil_dim
self.channel_dim = channel_dim
def forward(self, coil_images: torch.Tensor, sensitivity_map: torch.Tensor) -> torch.Tensor:
"""Performs forward pass of :class:`StandardizationLayer`.
Parameters
----------
coil_images: torch.Tensor
Coil images tensor.
sensitivity_map: torch.Tensor
Sensitivity maps.
Returns
-------
torch.Tensor
"""
combined_image = T.reduce_operator(coil_images, sensitivity_map, self.coil_dim)
residual_image = combined_image.unsqueeze(self.coil_dim) - T.complex_multiplication(
sensitivity_map, combined_image.unsqueeze(self.coil_dim)
)
concat = torch.cat(
[
torch.cat([combined_image, residual_image.select(self.coil_dim, idx)], self.channel_dim).unsqueeze(
self.coil_dim
)
for idx in range(coil_images.size(self.coil_dim))
],
self.coil_dim,
)
return concat
class MultiDomainNet(nn.Module):
"""Feature-level multi-domain module.
Inspired by AIRS Medical submission to the Fast MRI 2020 challenge.
"""
def __init__(
self,
forward_operator: Callable,
backward_operator: Callable,
standardization: bool = True,
num_filters: int = 16,
num_pool_layers: int = 4,
dropout_probability: float = 0.0,
**kwargs,
):
"""Inits :class:`MultiDomainNet`.
Parameters
----------
forward_operator: Callable
Forward Operator.
backward_operator: Callable
Backward Operator.
standardization: bool
If True standardization is used. Default: True.
num_filters: int
Number of filters for the :class:`MultiDomainUnet` module. Default: 16.
num_pool_layers: int
Number of pooling layers for the :class:`MultiDomainUnet` module. Default: 4.
dropout_probability: float
Dropout probability for the :class:`MultiDomainUnet` module. Default: 0.0.
"""
super().__init__()
self.forward_operator = forward_operator
self.backward_operator = backward_operator
self._coil_dim = 1
self._complex_dim = -1
self._spatial_dims = (2, 3)
if standardization:
self.standardization = StandardizationLayer(self._coil_dim, self._complex_dim)
self.unet = MultiDomainUnet2d(
forward_operator,
backward_operator,
in_channels=4 if standardization else 2, # if standardization, in_channels is 4 due to standardized input
out_channels=2,
num_filters=num_filters,
num_pool_layers=num_pool_layers,
dropout_probability=dropout_probability,
)
def _compute_model_per_coil(self, model: nn.Module, data: torch.Tensor) -> torch.Tensor:
"""Computes model per coil.
Parameters
----------
model: nn.Module
Model to compute.
data: torch.Tensor
Data to pass in the model.
Returns
-------
output: torch.Tensor
"""
output = []
for idx in range(data.size(self._coil_dim)):
subselected_data = data.select(self._coil_dim, idx)
output.append(model(subselected_data))
output = torch.stack(output, dim=self._coil_dim)
return output
def forward(self, masked_kspace: torch.Tensor, sensitivity_map: torch.Tensor) -> torch.Tensor:
"""Performs forward pass of :class:`MultiDomainNet`.
Parameters
----------
masked_kspace: torch.Tensor
Masked k-space of shape (N, coil, height, width, complex=2).
sensitivity_map: torch.Tensor
Sensitivity map of shape (N, coil, height, width, complex=2).
Returns
-------
output_image: torch.Tensor
Multi-coil output image of shape (N, coil, height, width, complex=2).
"""
input_image = self.backward_operator(masked_kspace, dim=self._spatial_dims)
if hasattr(self, "standardization"):
input_image = self.standardization(input_image, sensitivity_map)
output_image = self._compute_model_per_coil(self.unet, input_image.permute(0, 1, 4, 2, 3)).permute(
0, 1, 3, 4, 2
)
return output_image
|
a1c26196d51027ed56a82eb3e5c6740b1e000b8c
|
7162c7fa1433f8bacc666e611241b32232ef3792
|
/src/interrogate/config.py
|
250f8d29c6228c2f2613e6b11721024c0d4aa57b
|
[
"CC-BY-3.0",
"MIT"
] |
permissive
|
econchick/interrogate
|
ff4e04e9a4a677a8dd694599e39705f80a5c3ad9
|
1e74611fc5296b0572b6bb11b480d43242c4ec49
|
refs/heads/master
| 2023-04-11T11:54:42.297583
| 2022-07-29T16:12:10
| 2022-07-29T16:12:10
| 258,385,030
| 497
| 49
|
MIT
| 2023-09-10T13:43:00
| 2020-04-24T02:33:25
|
Python
|
UTF-8
|
Python
| false
| false
| 7,863
|
py
|
config.py
|
# Copyright 2020 Lynn Root
"""
Configuration-related helpers.
"""
# Adapted from Black https://github.com/psf/black/blob/master/black.py.
import configparser
import os
import pathlib
import attr
import click
import toml
# TODO: idea: break out InterrogateConfig into two classes: one for
# running the tool, one for reporting the results
@attr.s
class InterrogateConfig:
"""Configuration related to interrogating a given codebase.
:param bool color: Highlight verbose output with color.
:param fail_under: Fail when coverage % is less than a given amount.
:type fail_under: `int` or `float`
:param str ignore_regex: Regex identifying class, method, and
function names to ignore.
:param bool ignore_magic: Ignore all magic methods of classes.
:param bool ignore_module: Ignore module-level docstrings.
:param bool ignore_private: Ignore private classes, methods, and
functions starting with two underscores.
:param bool ignore_semiprivate: Ignore semiprivate classes, methods,
and functions starting with a single underscore.
:param bool ignore_init_method: Ignore ``__init__`` method of
classes.
:param bool ignore_nested_functions: Ignore nested functions and
methods.
:param bool ignore_init_module: Ignore ``__init__.py`` modules.
:param str include_regex: Regex identifying class, method, and
function names to include.
:param bool omit_covered_files: Omit reporting files that have 100%
documentation coverage.
"""
color = attr.ib(default=False)
fail_under = attr.ib(default=80.0)
ignore_regex = attr.ib(default=False)
ignore_magic = attr.ib(default=False)
ignore_module = attr.ib(default=False)
ignore_private = attr.ib(default=False)
ignore_semiprivate = attr.ib(default=False)
ignore_init_method = attr.ib(default=False)
ignore_init_module = attr.ib(default=False)
ignore_nested_classes = attr.ib(default=False)
ignore_nested_functions = attr.ib(default=False)
ignore_property_setters = attr.ib(default=False)
ignore_property_decorators = attr.ib(default=False)
include_regex = attr.ib(default=False)
omit_covered_files = attr.ib(default=False)
def find_project_root(srcs):
"""Return a directory containing .git, .hg, or pyproject.toml.
That directory can be one of the directories passed in `srcs` or their
common parent.
If no directory in the tree contains a marker that would specify it's the
project root, the root of the file system is returned.k
"""
if not srcs:
return pathlib.Path("/").resolve()
common_base = min(pathlib.Path(src).resolve() for src in srcs)
if common_base.is_dir():
# Append a fake file so `parents` below returns `common_base_dir`, too.
common_base /= "fake-file"
for directory in common_base.parents:
if (directory / ".git").exists():
return directory
if (directory / ".hg").is_dir():
return directory
if (directory / "pyproject.toml").is_file():
return directory
return directory
def find_project_config(path_search_start):
"""Find the absolute filepath to a pyproject.toml if it exists."""
project_root = find_project_root(path_search_start)
pyproject_toml = project_root / "pyproject.toml"
if pyproject_toml.is_file():
return str(pyproject_toml)
setup_cfg = project_root / "setup.cfg"
return str(setup_cfg) if setup_cfg.is_file() else None
def parse_pyproject_toml(path_config):
"""Parse ``pyproject.toml`` file and return relevant parts for Interrogate.
:param str path_config: Path to ``pyproject.toml`` file.
:return: Dictionary representing configuration for Interrogate.
:rtype: dict
:raise OSError: an I/O-related error when opening ``pyproject.toml``.
:raise toml.TomlDecodeError: unable to load ``pyproject.toml``.
"""
pyproject_toml = toml.load(path_config)
config = pyproject_toml.get("tool", {}).get("interrogate", {})
return {
k.replace("--", "").replace("-", "_"): v for k, v in config.items()
}
def sanitize_list_values(value):
"""Parse a string of list items to a Python list.
This is super hacky...
:param str value: string-representation of a Python list
:return: List of strings
:rtype: list
"""
if not value:
return []
if value.startswith("["):
value = value[1:]
if value.endswith("]"):
value = value[:-1]
if not value:
return []
raw_values = [v.strip() for v in value.split(",")]
return [v.strip('"') for v in raw_values]
def parse_setup_cfg(path_config):
"""Parse ``setup.cfg`` file and return relevant parts for Interrogate.
This is super hacky...
:param str path_config: Path to ``setup.cfg`` file.
:return: Dictionary representing configuration for Interrogate.
:rtype: dict
:raise OSError: an I/O-related error when opening ``setup.cfg``.
:raise configparser.ConfigParser: unable to load ``setup.cfg``.
"""
cfg = configparser.ConfigParser()
cfg.read(path_config)
try:
interrogate_section = cfg["tool:interrogate"]
except KeyError:
return None
keys_for_list_values = ["whitelist_regex", "ignore_regex", "exclude"]
raw_config = dict(interrogate_section.items())
config = {
k.replace("--", "").replace("-", "_"): v for k, v in raw_config.items()
}
for k, v in config.items():
if k in keys_for_list_values:
config[k] = sanitize_list_values(v)
elif v.lower() == "false":
config[k] = False
elif v.lower() == "true":
config[k] = True
return config
def read_config_file(ctx, param, value):
"""Inject config from ``pyproject.toml`` or ``setup.py`` into ``ctx``.
These override option defaults, but still respect option values
provided via the CLI.
:param click.Context ctx: click command context.
:param click.Parameter param: click command parameter (in this case,
``config`` from ``-c|--config``).
:param str value: path to ``pyproject.toml`` or ``setup.cfg`` file.
:return: path to ``pyproject.toml`` or ``setup.cfg`` file.
:rtype: str
:raise click.FileError: if ``pyproject.toml`` or ``setup.cfg`` is not
parseable or otherwise not available (i.e. does not exist).
"""
if not value:
paths = ctx.params.get("paths")
if not paths:
paths = (os.path.abspath(os.getcwd()),)
value = find_project_config(paths)
if value is None:
return None
config = None
if value.endswith(".toml"):
try:
config = parse_pyproject_toml(value)
except (toml.TomlDecodeError, OSError) as e:
raise click.FileError(
filename=value,
hint="Error reading configuration file: {}".format(e),
)
elif value.endswith(".cfg"):
try:
config = parse_setup_cfg(value)
except configparser.ParsingError as e:
raise click.FileError(
filename=value,
hint="Error reading configuration file: {}".format(e),
)
if not config:
return None
if ctx.default_map is None:
ctx.default_map = {}
# for backwards compatibility. before 1.1.3, only one regex was allowed.
# with 1.1.3+, multiple regexes can be provided, but we want to honor
# those that configured their pyproject.toml to be a single regex
# string (since now we're expecting a list of strings).
if "ignore_regex" in config:
if isinstance(config["ignore_regex"], str):
config["ignore_regex"] = [config["ignore_regex"]]
ctx.default_map.update(config)
return value
|
cfe94f1800d9acdd7cefa2cf37842be9f3f0aadb
|
7f59e2c4e771c19378e9839406c220d3985e7efe
|
/python-toolbox/marvin_python_toolbox/utils/misc.py
|
5f4d01b33ae8b448f8c517697cfbba2f45516fef
|
[
"Apache-2.0"
] |
permissive
|
apache/incubator-marvin
|
c6ff32d50eb01ccd84266587d79f562a9e371496
|
58fdccf2e677041a13966ddbdd96d484edf3b474
|
refs/heads/develop
| 2023-08-30T12:46:56.973102
| 2022-11-18T15:27:52
| 2022-11-18T15:27:52
| 148,087,939
| 112
| 77
|
Apache-2.0
| 2023-03-07T05:45:59
| 2018-09-10T02:27:54
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 6,459
|
py
|
misc.py
|
#!/usr/bin/env python
# coding=utf-8
# Copyright [2020] [Apache Software Foundation]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import subprocess
import tarfile
import wget
import glob
import pickle
import datetime
import time
import shutil
from cryptography.hazmat.primitives import serialization as crypto_serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.backends import default_backend as crypto_default_backend
from .log import get_logger
logger = get_logger('misc')
def make_tarfile(output_filename, source_dir):
with tarfile.open(output_filename, "w:gz") as tar:
tar.add(source_dir, arcname=os.path.basename(source_dir))
def package_to_name(package):
#remove marvin_ substring
return package[len("marvin_"):]
def name_to_package(name):
return "marvin_{}".format(name)
def generate_engine_package(package, path, dest=None):
filename = package + "-" + get_version(package, path) + ".tar.gz"
output = os.path.join('/tmp/marvin', filename)
make_tarfile(output, path)
if dest is not None:
move_dest = os.path.join(dest, filename)
else:
move_dest = os.path.join(path, "docker", "develop" ,"daemon", filename)
shutil.move(output, move_dest)
def get_version(package, path):
with open(os.path.join(path, package ,"VERSION"), 'rb') as f:
version = f.read().decode('ascii').strip()
return version
def package_folder(input, output):
with tarfile.open(output, "w:gz") as tar:
tar.add(input, arcname=os.path.basename(input))
def extract_folder(input, output):
tf = tarfile.open(input)
tf.extractall(output)
def call_logs(engine):
container_name = 'marvin-cont-' + engine
p_return = subprocess.Popen(['docker', 'logs', '--follow', container_name], stdout=subprocess.PIPE)
return p_return
def create_or_return_tmp_dir():
tmp_path = '/tmp/marvin'
if not os.path.exists(tmp_path):
os.makedirs(tmp_path)
return tmp_path
def write_tmp_info(key, info):
_filepath = os.path.join(create_or_return_tmp_dir(), key)
logger.info("Creating {0}...".format(key))
with open(_filepath, 'w') as f:
f.write(info)
def retrieve_tmp_info(key):
_filepath = os.path.join(create_or_return_tmp_dir(), key)
logger.info("Retriving {0}...".format(key))
try:
with open(_filepath, 'r') as f:
info = f.read()
return info
except:
return None
def get_executor_path_or_download(executor_url):
#get filename from url
_executor_name = executor_url.split('/').pop(-1)
executor_path = os.path.join(os.environ['MARVIN_DATA_PATH'], _executor_name)
if not os.path.exists(executor_path):
logger.info("Downloading engine executor in {0}...".format(executor_path))
wget.download(executor_url, out=executor_path)
return executor_path
def generate_timestamp():
return datetime.datetime.now().timestamp()
def create_tmp_marvin_folder():
_dir = '/tmp/marvin'
if not os.path.exists(_dir):
os.makedirs(_dir)
def get_chunk_and_untar(bits, output_path):
_dir = '/tmp/marvin'
_tmp_path = os.path.join(_dir, 'tmp_data')
#save tar in tmp file
with open(_tmp_path, 'wb') as f:
for chunk in bits:
f.write(chunk)
f.close()
#extract files
with tarfile.open(_tmp_path) as tf:
tf.extractall(output_path)
tf.close()
#remove tmp_data
os.remove(_tmp_path)
def get_tar_data(source, folder, compress):
_dir = '/tmp/marvin'
tmp_path = os.path.join(_dir, 'tmp_data')
_tar_mode = "w:gz" if compress else "w"
#save tar in tmp file
with tarfile.open(tmp_path, _tar_mode) as tf:
if folder:
tf.add(source, arcname='.')
else:
tf.add(source, arcname=os.path.basename(source))
#get bytes from file
with open(tmp_path, 'rb') as bf:
temp_bytes = bf.read()
return (temp_bytes, tmp_path)
def generate_keys(engine_name):
_key_path = os.path.join(os.environ['MARVIN_DATA_PATH'],
'.keys',
engine_name)
os.makedirs(_key_path)
pvk_path = os.path.join(_key_path, 'id_rsa')
pubk_path = os.path.join(_key_path, 'id_rsa.pub')
key = rsa.generate_private_key(
backend=crypto_default_backend(),
public_exponent=65537,
key_size=2048
)
private_key = key.private_bytes(
crypto_serialization.Encoding.PEM,
crypto_serialization.PrivateFormat.PKCS8,
crypto_serialization.NoEncryption()
)
public_key = key.public_key().public_bytes(
crypto_serialization.Encoding.OpenSSH,
crypto_serialization.PublicFormat.OpenSSH
)
open(pubk_path ,"w").write(public_key.decode("utf-8"))
open(pvk_path ,"w").write(private_key.decode("utf-8"))
os.chmod(pvk_path, 0o500)
return pubk_path
def init_port_forwarding(engine_name, remote_host, ports_list, background=True):
if remote_host != 'localhost' and remote_host != '127.0.0.1':
pkey_path = os.path.join(os.environ['MARVIN_DATA_PATH'], '.keys', engine_name, 'id_rsa')
command_list = ["ssh"]
command_list.append("-o")
command_list.append("StrictHostKeyChecking=no")
command_list.append("-N")
if background:
command_list.append('-f')
for remote_port in ports_list:
command_list.append("-L")
command_list.append("localhost:{0}:localhost:{0}".format(remote_port))
command_list.append("-i")
command_list.append("{0}".format(pkey_path))
command_list.append("marvin@{0}".format(remote_host))
command_list.append("-p")
command_list.append("2022")
if not background:
logger.info("Press Ctrl+C to disable port forwarding")
os.system(" ".join(command_list))
|
ac935b1ed0114bb23981d9e0732f9f6e262454d3
|
6a31e1996338d221e62ba7a14d0c182aed681298
|
/taskbank/lib/models/cycle_siamese_nets.py
|
1a8404740147501d872bb5304d39d700d0bb0abd
|
[
"MIT"
] |
permissive
|
StanfordVL/taskonomy
|
74ae0c95f0780ac7494f48e1a5fa5e01ec5139ae
|
9f814867b5fe4165860862211e8e99b0f200144d
|
refs/heads/master
| 2022-12-09T19:06:18.313618
| 2021-10-22T00:04:41
| 2021-10-22T00:04:41
| 114,683,091
| 847
| 159
|
MIT
| 2022-12-08T01:00:21
| 2017-12-18T20:08:09
|
Python
|
UTF-8
|
Python
| false
| false
| 15,492
|
py
|
cycle_siamese_nets.py
|
'''Standard Siamese model
The Siamese Network takes input as a list of image (a list of ndarray)
Model-specific config.py options: (inherits from models.base_net):
'batch_size': An int. The number of input bundle to use in a batch
'num_input': An int. The number of images within an input bundle
'hidden_size': An int. The size of representation size before FC layer
'output_size': For discriminative task, the size of output.
Encoder:
'encoder': A function that will build take 'input_placeholder', 'is_training', 'hidden_size', and returns a representation.
-'encoder_kwargs': A Dict of all args to pass to 'encoder'.
'''
from __future__ import absolute_import, division, print_function
from functools import partial
from models.siamese_nets import StandardSiamese
import losses.all as losses_lib
import tensorflow as tf
import tensorflow.contrib.slim as slim
from models.sample_models import *
from models.resnet_v1 import *
import optimizers.train_steps as train_steps
import optimizers.ops as optimize
import pdb
import numpy as np
class CycleSiamese(StandardSiamese):
'''
'''
def __init__(self, global_step, cfg):
'''
Args:
cfg: Configuration.
'''
super(CycleSiamese, self).__init__(global_step, cfg)
self.cfg = cfg
if 'hidden_size' not in cfg:
raise ValueError("config.py for Siamese Network must specify 'hidden_size'")
if 'num_input' not in cfg:
raise ValueError("config.py for Siamese Network must specify 'num_input'")
if 'encoder' not in cfg:
raise ValueError("config.py for Siamese Network must specify 'encoder'")
if 'metric_net' not in cfg:
raise ValueError("config.py for Siamese Network must specify 'metric_net'")
# Normalize for fixated relative camera pose
import pickle
import os
with open(os.path.join(cfg['root_dir'], 'lib/data/camera_mean_and_std.pkl'), 'rb') as fp:
data = pickle.load(fp)
self.std = np.asarray([ 10.12015407, 8.1103528, 1.09171896, 1.21579016, 0.26040945, 10.05966329])
self.mean = np.asarray([ -2.67375523e-01, -1.19147040e-02, 1.14497274e-02, 1.10903410e-03, 2.10509948e-02, -4.02013549e+00])
self.mean = tf.cast(tf.stack(self.mean), cfg['target_dtype'])
self.std = tf.cast(tf.stack(self.std), cfg['target_dtype'])
if 'loss_threshold' in cfg:
self.threshold = tf.constant(cfg['loss_threshold'])
else:
self.threshold = None
self.cycle_rate = 0.5
def build_siamese_output_postprocess(self, encoder_output, is_training, scope=None):
'''build the post-process on siamese network structure output.
the default approach will be a three layer fully connected networks
args:
encoder_output: a list of tensors of output representations of each input image
is_training: flag for wheter the model is in training mode.
returns:
final_output: final output for the whole model
'''
metric_kwargs = {}
if 'metric_kwargs' in self.cfg:
metric_kwargs = self.cfg['metric_kwargs']
else:
raise valueerror("config.py for siamese network must specify 'metric_kwargs'")
if scope is not None:
metric_kwargs['scope'] = scope
concat_output = tf.concat(values=encoder_output,axis=1)
final_output, end_points = self.cfg['metric_net'](
concat_output,
is_training,
**metric_kwargs)
self.metric_endpoints = end_points
return final_output
def denormalize_fixated_camera_pose(self, fixated_camera_pose):
return fixated_camera_pose * self.std + self.mean
def normalize_fixated_camera_pose(self, fixated_camera_pose):
return (fixated_camera_pose - self.mean) / self.std
def euler2mat(self, rotation):
angle1 = tf.slice(rotation, [0,0], [self.cfg['batch_size'], 1])
angle2 = tf.slice(rotation, [0,1], [self.cfg['batch_size'], 1])
angle3 = tf.slice(rotation, [0,2], [self.cfg['batch_size'], 1])
c1 = tf.cos(angle1)
c2 = tf.cos(angle2)
c3 = tf.cos(angle3)
s1 = tf.sin(angle1)
s2 = tf.sin(angle2)
s3 = tf.sin(angle3)
cc = c1*c3
cs = c1*s3
sc = s1*c3
ss = s1*s3
m11 = c2*c3
m12 = s2*sc-cs
m13 = s2*cc+ss
m21 = c2*s3
m22 = s2*ss+cc
m23 = s2*cs-sc
m31 = -s2
m32 = c2*s1
m33 = c2*c1
m1 = tf.concat([m11,m12,m13], 1)
m2 = tf.concat([m21,m22,m23], 1)
m3 = tf.concat([m31,m32,m33], 1)
m = tf.stack([m1,m2,m3], axis=1)
return m
def atan2(self, y, x, epsilon=1.0e-12):
# Add a small number to all zeros, to avoid division by zero:
x = tf.where(tf.equal(x, 0.0), x+epsilon, x)
y = tf.where(tf.equal(y, 0.0), y+epsilon, y)
angle = tf.where(tf.greater(x,0.0), tf.atan(y/x), tf.zeros_like(x))
angle = tf.where(tf.logical_and(tf.less(x,0.0), tf.greater_equal(y,0.0)), tf.atan(y/x) + np.pi, angle)
angle = tf.where(tf.logical_and(tf.less(x,0.0), tf.less(y,0.0)), tf.atan(y/x) - np.pi, angle)
angle = tf.where(tf.logical_and(tf.equal(x,0.0), tf.greater(y,0.0)), 0.5*np.pi * tf.ones_like(x), angle)
angle = tf.where(tf.logical_and(tf.equal(x,0.0), tf.less(y,0.0)), -0.5*np.pi * tf.ones_like(x), angle)
angle = tf.where(tf.logical_and(tf.equal(x,0.0), tf.equal(y,0.0)), tf.zeros_like(x), angle)
return angle
def mat2euler(self, rotation_matrix):
m11 = tf.slice(rotation_matrix, [0,0,0], [self.cfg['batch_size'], 1, 1])
m12 = tf.slice(rotation_matrix, [0,0,1], [self.cfg['batch_size'], 1, 1])
m13 = tf.slice(rotation_matrix, [0,0,2], [self.cfg['batch_size'], 1, 1])
m21 = tf.slice(rotation_matrix, [0,1,0], [self.cfg['batch_size'], 1, 1])
m22 = tf.slice(rotation_matrix, [0,1,1], [self.cfg['batch_size'], 1, 1])
m23 = tf.slice(rotation_matrix, [0,1,2], [self.cfg['batch_size'], 1, 1])
m31 = tf.slice(rotation_matrix, [0,2,0], [self.cfg['batch_size'], 1, 1])
m32 = tf.slice(rotation_matrix, [0,2,1], [self.cfg['batch_size'], 1, 1])
m33 = tf.slice(rotation_matrix, [0,2,2], [self.cfg['batch_size'], 1, 1])
cy = tf.sqrt(m11*m11+m21*m21)
eps4 = 4.0e-16
ax = tf.where(tf.greater(cy,eps4), self.atan2(m32,m33), self.atan2(-m23,m22))
ay = self.atan2(-m31,cy)
az = tf.where(tf.greater(cy,eps4), self.atan2(m21,m11), tf.zeros_like(ay))
rotation = tf.concat([ax,ay,az], 1)
return tf.squeeze(rotation, axis=-1)
def calculate_combined_relative_camera_pose(self, pose1, pose2):
rotation1 = tf.slice(pose1, [0,0], [self.cfg['batch_size'], 3])
translation1 = tf.slice(pose1, [0,3], [self.cfg['batch_size'], 3])
rotation2 = tf.slice(pose2, [0,0], [self.cfg['batch_size'], 3])
translation2 = tf.slice(pose2, [0,3], [self.cfg['batch_size'], 3])
rotation_matrix_1 = self.euler2mat(rotation1)
rotation_matrix_2 = self.euler2mat(rotation2)
translation = tf.squeeze(tf.matmul(rotation_matrix_2,
tf.expand_dims(translation1, -1))) + translation2
rotation = self.mat2euler(tf.matmul(rotation_matrix_2, rotation_matrix_1))
pose = tf.concat([rotation, translation], 1)
return pose
def build_model(self, input_imgs, is_training, targets, masks=None, privileged_input=None):
'''Builds the model. Assumes that the input is from range [0, 1].
Args:
input_imgs: list of input images (scaled between -1 and 1) with the
dimensions specified in the cfg
is_training: flag for whether the model is in training mode or not
mask: mask used for computing sum of squares loss. If None, we assume
it is np.ones.
'''
print('building model')
cfg = self.cfg
self.is_training = is_training
if self.decoder_only:
encoder_output = input_imgs # Assume that the input is the representation
else:
encoder_output = self.build_encoder(input_imgs, is_training)
final_output_12 = self.build_siamese_output_postprocess(encoder_output, is_training, scope="three_layer_fc_network12")
final_output_23 = self.build_siamese_output_postprocess(encoder_output, is_training, scope="three_layer_fc_network23")
final_output_13 = self.calculate_combined_relative_camera_pose(
self.denormalize_fixated_camera_pose(final_output_12),
self.denormalize_fixated_camera_pose(final_output_23))
final_output_13 = self.normalize_fixated_camera_pose(final_output_13)
#final_output = tf.concat(1, [final_output_12, final_output_13, final_output_23])
target12 = tf.slice(targets, [0,0], [self.cfg['batch_size'], 6])
target13 = tf.slice(targets, [0,6], [self.cfg['batch_size'], 6])
target23 = tf.slice(targets, [0,12], [self.cfg['batch_size'], 6])
final_output = [final_output_12, final_output_13, final_output_23]
target_total = [target12, target13, target23]
losses = self.get_losses(final_output, target_total, is_softmax='l2_loss' not in cfg)
# use weight regularization
if 'omit_weight_reg' in cfg and cfg['omit_weight_reg']:
add_reg = False
else:
add_reg = True
# get losses
regularization_loss = tf.add_n( slim.losses.get_regularization_losses(), name='losses/regularization_loss' )
total_loss = slim.losses.get_total_loss( add_regularization_losses=add_reg,
name='losses/total_loss')
self.input_images = input_imgs
self.targets = targets
self.encoder_output = encoder_output
self.losses = losses
self.total_loss = total_loss
self.decoder_output = final_output
# add summaries
if self.extended_summaries:
slim.summarize_variables()
slim.summarize_weights()
slim.summarize_biases()
slim.summarize_activations()
slim.summarize_collection(tf.GraphKeys.LOSSES)
tf.summary.scalar('accuracy', self.accuracy)
slim.summarize_tensor( regularization_loss )
slim.summarize_tensor( total_loss )
self.model_built = True
def get_losses(self, final_output, target, is_softmax=True):
'''Returns the loss for a Siamese Network.
Args:
final_output: tensor that represent the final output of the image bundle.
target: Tensor of target to be output by the siamese network.
Returns:
losses: list of tensors representing each loss component
'''
print('setting up losses...')
self.target = target
self.final_output = final_output
with tf.variable_scope('losses'):
if is_softmax:
correct_prediction = tf.equal(tf.argmax(final_output,1), target)
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
siamese_loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=final_output,
labels=target,
name='softmax_loss'))
self.siamese_loss = siamese_loss
else:
# If it's not softmax, it's l2 norm loss.
self.accuracy = 0
# self.l2_loss = tf.losses.mean_squared_error(
# final_output,
# target,
# scope='d1',
# loss_collection=tf.GraphKeys,
# reduction="none")
self.l2_loss = []
for i in range(len(final_output)):
target[i] = tf.to_float(target[i])
final_output[i] = tf.to_float(final_output[i])
self.l2_loss.append( tf.norm(target[i] - final_output[i], axis=1) )
#self.l2_loss_sum = tf.reduce_sum(self.l2_loss, 1)
siamese_loss = self.l2_loss
self.robust_l2_loss = []
if self.threshold is not None:
for i in range(len(siamese_loss)):
ind = tf.unstack(siamese_loss[i])
siamese_loss[i] = [ tf.cond(tf.greater(x, self.threshold),
lambda: self.threshold + self.threshold * tf.log(x / self.threshold),
lambda: x) for x in ind ]
self.robust_l2_loss.append(siamese_loss[i])
siamese_loss[i] = tf.stack(siamese_loss[i])
self.siamese_losses = []
for i in range(len(siamese_loss)):
self.siamese_losses.append( tf.reduce_sum(siamese_loss[i]) / self.cfg['batch_size'] )
self.siamese_losses[1] = self.siamese_losses[1] * self.cycle_rate
self.cycle_loss_total = tf.add_n(self.siamese_losses)
tf.add_to_collection(tf.GraphKeys.LOSSES, self.cycle_loss_total)
losses = [self.cycle_loss_total]
return losses
def get_train_step_fn( self ):
'''
Returns:
A train_step funciton which takes args:
(sess, train_ops, global_stepf)
'''
return partial( train_steps.discriminative_train_step_fn,
return_accuracy=self.cfg['return_accuracy'] )
def build_train_op( self, global_step ):
'''
Builds train ops for discriminative task
Args:
global_step: A Tensor to be incremented
Returns:
[ loss_op, accuracy ]
'''
if not self.model_built or self.total_loss is None :
raise RuntimeError( "Cannot build optimizers until 'build_model' ({0}) and 'get_losses' {1} are run".format(
self.model_built, self.losses_built ) )
self.global_step = global_step
t_vars = tf.trainable_variables()
# Create the optimizer train_op for the generator
self.optimizer = optimize.build_optimizer( global_step=self.global_step, cfg=self.cfg )
if 'clip_norm' in self.cfg:
self.loss_op = optimize.create_train_op( self.total_loss, self.optimizer, update_global_step=True, clip_gradient_norm=self.cfg['clip_norm'])
else:
if self.is_training:
self.loss_op = optimize.create_train_op( self.total_loss, self.optimizer, update_global_step=True )
else:
self.loss_op = optimize.create_train_op( self.total_loss, self.optimizer, is_training=False, update_global_step=True )
# Create a train_op for the discriminator
self.train_op = [ self.loss_op, self.accuracy ]
self.train_op_built = True
return self.train_op
|
880e7be4d364a7c7eaf3c63944dc6a65aa91494c
|
fda6a1be714d8e27a5d8dd3df795df45538f2fe7
|
/graphene/types/unmountedtype.py
|
83a6afefc2334234086b5d638283ee032b17de4b
|
[
"MIT"
] |
permissive
|
graphql-python/graphene
|
6badaaa97c8ad78552a656f9da9ed577cfc37add
|
93cb33d359bf2109d1b81eaeaf052cdb06f93f49
|
refs/heads/master
| 2023-08-05T02:48:36.967050
| 2023-07-26T07:43:40
| 2023-07-26T07:43:40
| 43,056,951
| 8,187
| 1,088
|
MIT
| 2023-09-01T19:59:19
| 2015-09-24T09:18:18
|
Python
|
UTF-8
|
Python
| false
| false
| 2,344
|
py
|
unmountedtype.py
|
from ..utils.orderedtype import OrderedType
class UnmountedType(OrderedType):
"""
This class acts a proxy for a Graphene Type, so it can be mounted
dynamically as Field, InputField or Argument.
Instead of writing:
.. code:: python
from graphene import ObjectType, Field, String
class MyObjectType(ObjectType):
my_field = Field(String, description='Description here')
It lets you write:
.. code:: python
from graphene import ObjectType, String
class MyObjectType(ObjectType):
my_field = String(description='Description here')
It is not used directly, but is inherited by other types and streamlines their use in
different context:
- Object Type
- Scalar Type
- Enum
- Interface
- Union
An unmounted type will accept arguments based upon its context (ObjectType, Field or
InputObjectType) and pass it on to the appropriate MountedType (Field, Argument or InputField).
See each Mounted type reference for more information about valid parameters.
"""
def __init__(self, *args, **kwargs):
super(UnmountedType, self).__init__()
self.args = args
self.kwargs = kwargs
def get_type(self):
"""
This function is called when the UnmountedType instance
is mounted (as a Field, InputField or Argument)
"""
raise NotImplementedError(f"get_type not implemented in {self}")
def mount_as(self, _as):
return _as.mounted(self)
def Field(self): # noqa: N802
"""
Mount the UnmountedType as Field
"""
from .field import Field
return self.mount_as(Field)
def InputField(self): # noqa: N802
"""
Mount the UnmountedType as InputField
"""
from .inputfield import InputField
return self.mount_as(InputField)
def Argument(self): # noqa: N802
"""
Mount the UnmountedType as Argument
"""
from .argument import Argument
return self.mount_as(Argument)
def __eq__(self, other):
return self is other or (
isinstance(other, UnmountedType)
and self.get_type() == other.get_type()
and self.args == other.args
and self.kwargs == other.kwargs
)
|
4c916473bae1127a5627d43516dcf26c09cee7cf
|
33f805792e79a9ef1d577699b983031521d5b6c9
|
/tapiriik/payments/external/motivato.py
|
705fae73c4cc4dadcb3b282c047bd5a5c901a6a7
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
cpfair/tapiriik
|
0dce9599400579d33acbbdaba16806256270d0a3
|
c67e9848e67f515e116bb19cd4dd479e8414de4d
|
refs/heads/master
| 2023-08-28T10:17:11.070324
| 2023-07-25T00:59:33
| 2023-07-25T00:59:33
| 7,812,229
| 1,519
| 343
|
Apache-2.0
| 2022-10-24T16:52:34
| 2013-01-25T02:43:42
|
Python
|
UTF-8
|
Python
| false
| false
| 2,262
|
py
|
motivato.py
|
from .provider_base import ExternalPaymentProvider
from tapiriik.database import db
from tapiriik.settings import MOTIVATO_PREMIUM_USERS_LIST_URL
import requests
class MotivatoExternalPaymentProvider(ExternalPaymentProvider):
ID = "motivato"
def RefreshPaymentStateForExternalIDs(self, external_ids):
from tapiriik.services import Service, ServiceRecord
external_ids = [str(x) for x in external_ids]
connections = [ServiceRecord(x) for x in db.connections.find({"Service": "motivato", "ExternalID": {"$in": external_ids}})]
users = db.users.find({"ConnectedServices.ID": {"$in": [x._id for x in connections]}})
for user in users:
my_connection = [x for x in connections if x._id in [y["ID"] for y in user["ConnectedServices"]]][0]
# Defer to the actual service module, where all the session stuff is set up
state = Service.FromID("motivato")._getPaymentState(my_connection)
self.ApplyPaymentState(user, state, my_connection.ExternalID, duration=None)
def RefreshPaymentState(self):
from tapiriik.services import ServiceRecord
from tapiriik.payments import Payments
from tapiriik.auth import User
external_ids = requests.get(MOTIVATO_PREMIUM_USERS_LIST_URL).json()
connections = [ServiceRecord(x) for x in db.connections.find({"Service": "motivato", "ExternalID": {"$in": external_ids}})]
users = list(db.users.find({"ConnectedServices.ID": {"$in": [x._id for x in connections]}}))
payments = []
# Pull relevant payment objects and associate with users
for user in users:
my_connection = [x for x in connections if x._id in [y["ID"] for y in user["ConnectedServices"]]][0]
pmt = Payments.EnsureExternalPayment(self.ID, my_connection.ExternalID, duration=None)
payments.append(pmt)
User.AssociateExternalPayment(user, pmt, skip_deassoc=True)
# Bulk-remove these payments from users who don't own them (more or less - it'll leave anyone who switched remote accounts)
db.users.update({"_id": {"$nin": [x["_id"] for x in users]}}, {"$pull": {"ExternalPayments": {"_id": {"$in": [x["_id"] for x in payments]}}}}, multi=True)
# We don't bother unsetting users who are no longer on the list - they'll be refreshed at their next sync
ExternalPaymentProvider.Register(MotivatoExternalPaymentProvider())
|
c0811d9c77a11bedb5d0d93b4b1f4afc87bf4014
|
2a1b8a671aceda6bc446f8ce26400aa84fa444a6
|
/Packs/Nmap/Integrations/Nmap/Nmap.py
|
db0f4b383ff99a58ec7a7c165550e12db3760177
|
[
"MIT"
] |
permissive
|
demisto/content
|
6d4722d46f0ff0beea2748e9f7de585bf91a78b4
|
890def5a0e0ae8d6eaa538148249ddbc851dbb6b
|
refs/heads/master
| 2023-09-04T00:02:25.618032
| 2023-09-03T21:56:22
| 2023-09-03T21:56:22
| 60,525,392
| 1,023
| 1,921
|
MIT
| 2023-09-14T20:55:24
| 2016-06-06T12:17:02
|
Python
|
UTF-8
|
Python
| false
| false
| 2,589
|
py
|
Nmap.py
|
import demistomock as demisto
from CommonServerPython import *
from libnmap.process import NmapProcess
from libnmap.parser import NmapParser
from libnmap.reportjson import ReportEncoder
if demisto.command() == 'test-module':
demisto.results('ok')
sys.exit(0)
if demisto.command() == 'nmap-scan':
nm = NmapProcess(argToList(demisto.args()['targets']), options=demisto.args()['options'])
rc = nm.run()
if rc != 0:
demisto.results({
'Type': entryTypes['error'],
'ContentsFormat': formats['text'],
'Contents': 'Unable to execute - ' + nm.stderr
})
sys.exit(0)
r = NmapParser.parse(nm.stdout)
md = '## ' + r.summary + '\n'
hosts = []
try:
scan_type = r.scan_type
except KeyError:
scan_type = None
for host in r.hosts:
h = {}
if len(host.hostnames):
tmp_host = host.hostnames.pop()
h['Hostname'] = tmp_host
else:
tmp_host = host.address
h['Address'] = host.address
h['Status'] = host.status
svc = []
md += "### Nmap scan report for {0}".format(tmp_host) + \
(" ({0})\n".format(host.address) if tmp_host != host.address else "\n")
md += "#### Host is {0}.\n".format(host.status)
for serv in host.services:
svc.append({
'Port': serv.port,
'Protocol': serv.protocol,
'State': serv.state,
'Service': serv.service,
'Banner': serv.banner
})
extras = []
for hostscript in host._extras.get('hostscript', []):
extras.append({
'ID': hostscript.get('id'),
'Output': hostscript.get('output'),
'Elements': hostscript.get('elements'),
})
md += tableToMarkdown('Services', svc, ['Port', 'Protocol', 'State', 'Service', 'Banner'])
md += tableToMarkdown('Script Results', extras, ['ID', 'Output', 'Elements'])
h['Services'] = svc
h['ScriptResults'] = extras
hosts.append(h)
scan = {
'Summary': r.summary,
'Version': r.version,
'Started': r.started,
'Ended': r.endtime,
'CommandLine': r.commandline,
'ScanType': scan_type,
'Hosts': hosts}
demisto.results({
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': json.dumps(r, cls=ReportEncoder),
'HumanReadable': md,
'EntryContext': {'NMAP.Scan': scan}
})
|
980a1793440680b5c9d44518793a5aab65d5ead2
|
4506d81df5ae98078e5cbe79f613514ad12b1c83
|
/nipype/external/fsl_imglob.py
|
3f131c33b372dbc4599b846164c4d065f0360f14
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
nipy/nipype
|
d52eba1b98fda68e24d006ac0d5701fc8a531b9c
|
03a236320fa229299d637ff9af97865a6ae76aca
|
refs/heads/master
| 2023-08-28T10:36:07.020541
| 2023-08-25T13:40:09
| 2023-08-25T13:40:09
| 791,477
| 692
| 569
|
NOASSERTION
| 2023-09-11T06:04:51
| 2010-07-22T17:06:49
|
Python
|
UTF-8
|
Python
| false
| false
| 5,977
|
py
|
fsl_imglob.py
|
#!/usr/bin/env python
# imglob - expand list of image filenames
# Stephen Smith, Mark Jenkinson & Matthew Webster FMRIB Image Analysis Group
# Copyright (C) 2009 University of Oxford
# Part of FSL - FMRIB's Software Library
# http://www.fmrib.ox.ac.uk/fsl
# fsl@fmrib.ox.ac.uk
#
# Developed at FMRIB (Oxford Centre for Functional Magnetic Resonance
# Imaging of the Brain), Department of Clinical Neurology, Oxford
# University, Oxford, UK
#
#
# LICENCE
#
# FMRIB Software Library, Release 5.0 (c) 2012, The University of
# Oxford (the "Software")
#
# The Software remains the property of the University of Oxford ("the
# University").
#
# The Software is distributed "AS IS" under this Licence solely for
# non-commercial use in the hope that it will be useful, but in order
# that the University as a charitable foundation protects its assets for
# the benefit of its educational and research purposes, the University
# makes clear that no condition is made or to be implied, nor is any
# warranty given or to be implied, as to the accuracy of the Software,
# or that it will be suitable for any particular purpose or for use
# under any specific conditions. Furthermore, the University disclaims
# all responsibility for the use which is made of the Software. It
# further disclaims any liability for the outcomes arising from using
# the Software.
#
# The Licensee agrees to indemnify the University and hold the
# University harmless from and against any and all claims, damages and
# liabilities asserted by third parties (including claims for
# negligence) which arise directly or indirectly from the use of the
# Software or the sale of any products based on the Software.
#
# No part of the Software may be reproduced, modified, transmitted or
# transferred in any form or by any means, electronic or mechanical,
# without the express permission of the University. The permission of
# the University is not required if the said reproduction, modification,
# transmission or transference is done without financial return, the
# conditions of this Licence are imposed upon the receiver of the
# product, and all original and amended source code is included in any
# transmitted product. You may be held legally responsible for any
# copyright infringement that is caused or encouraged by your failure to
# abide by these terms and conditions.
#
# You are not permitted under this Licence to use this Software
# commercially. Use for which any financial return is received shall be
# defined as commercial use, and includes (1) integration of all or part
# of the source code or the Software into a product for sale or license
# by or on behalf of Licensee to third parties or (2) use of the
# Software or any derivative of it for research with the final aim of
# developing software products for sale or license to a third party or
# (3) use of the Software or any derivative of it for research with the
# final aim of developing non-software products for sale or license to a
# third party, or (4) use of the Software to provide any service to an
# external organisation for which payment is received. If you are
# interested in using the Software commercially, please contact Isis
# Innovation Limited ("Isis"), the technology transfer company of the
# University, to negotiate a licence. Contact details are:
# innovation@isis.ox.ac.uk quoting reference DE/9564.
import sys
import glob
def usage():
print("Usage: $0 [-extension/extensions] <list of names>")
print(" -extension for one image with full extension")
print(" -extensions for image list with full extensions")
sys.exit(1)
# Returns whether an input filename has an image extension ( and the
# basename and extension pair )
def isImage(input, allExtensions):
for extension in allExtensions:
if input[-len(extension) :] == extension:
return True, input[: -len(extension)], extension
return False, input, ""
def removeImageExtension(input, allExtensions):
return isImage(input, allExtensions)[1]
def main():
if len(sys.argv) <= 1:
usage()
if sys.version_info < (2, 4):
import sets
from sets import Set
setAvailable = False
else:
setAvailable = True
deleteExtensions = True
primaryExtensions = [".nii.gz", ".nii", ".hdr.gz", ".hdr"]
secondaryExtensions = [".img.gz", ".img"]
allExtensions = primaryExtensions + secondaryExtensions
validExtensions = primaryExtensions
startingArg = 1
if sys.argv[1] == "-extensions":
validExtensions = allExtensions
deleteExtensions = False
startingArg = 2
if sys.argv[1] == "-extension":
deleteExtensions = False
startingArg = 2
filelist = []
for arg in range(startingArg, len(sys.argv)):
# #These if enables a "pedantic" style mode currently not used
# if isImage(sys.argv[arg],allExtensions)[0]:
# filelist.extend(glob.glob(sys.argv[arg]))
# else:
# for currentExtension in validExtensions:
# filelist.extend(glob.glob(sys.argv[arg]+currentExtension))
for currentExtension in validExtensions:
filelist.extend(
glob.glob(
removeImageExtension(sys.argv[arg], allExtensions)
+ currentExtension
)
)
if deleteExtensions:
for file in range(0, len(filelist)):
filelist[file] = removeImageExtension(filelist[file], allExtensions)
if setAvailable:
filelist = list(set(filelist))
else:
filelist = list(Set(filelist))
filelist.sort()
for file in range(0, len(filelist)):
print(filelist[file], end=" ")
if file < len(filelist) - 1:
print(" ", end=" ")
if __name__ == "__main__":
main()
|
8fb517892b69a5dcc4627fd9ee7d5fee2897d693
|
59f64b5cf799e31c97b11828dba4787afb8f3f17
|
/hail/python/test/hailtop/inter_cloud/test_diff.py
|
25676e903753ed914f7839893c75aaee11eeaea8
|
[
"MIT"
] |
permissive
|
hail-is/hail
|
2089e6f3b38548f13fa5c2a8ab67f5cfdd67b4f1
|
07a483ae0f46c66f3ed6fd265b48f48c06298f98
|
refs/heads/main
| 2023-09-01T15:03:01.450365
| 2023-09-01T02:46:35
| 2023-09-01T02:46:35
| 45,069,467
| 913
| 262
|
MIT
| 2023-09-14T21:53:32
| 2015-10-27T20:55:42
|
Python
|
UTF-8
|
Python
| false
| false
| 5,723
|
py
|
test_diff.py
|
from typing import Tuple, AsyncIterator, Dict
import secrets
import os
import asyncio
import pytest
import functools
from hailtop.aiotools.fs import AsyncFS
from hailtop.frozendict import frozendict
from hailtop.aiotools.diff import diff, DiffException
from hailtop.utils import bounded_gather2
from hailtop.aiotools.router_fs import RouterAsyncFS
@pytest.fixture(scope='module')
def event_loop():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
yield loop
loop.close()
@pytest.fixture(scope='module')
async def router_filesystem() -> AsyncIterator[Tuple[asyncio.Semaphore, AsyncFS, Dict[str, str]]]:
token = secrets.token_hex(16)
async with RouterAsyncFS() as fs:
file_base = f'/tmp/{token}/'
await fs.mkdir(file_base)
gs_bucket = os.environ['HAIL_TEST_GCS_BUCKET']
gs_base = f'gs://{gs_bucket}/tmp/{token}/'
s3_bucket = os.environ['HAIL_TEST_S3_BUCKET']
s3_base = f's3://{s3_bucket}/tmp/{token}/'
azure_account = os.environ['HAIL_TEST_AZURE_ACCOUNT']
azure_container = os.environ['HAIL_TEST_AZURE_CONTAINER']
azure_base = f'https://{azure_account}.blob.core.windows.net/{azure_container}/tmp/{token}/'
bases = {
'file': file_base,
'gs': gs_base,
's3': s3_base,
'azure-https': azure_base
}
sema = asyncio.Semaphore(50)
async with sema:
yield (sema, fs, bases)
await bounded_gather2(sema,
functools.partial(fs.rmtree, sema, file_base),
functools.partial(fs.rmtree, sema, gs_base),
functools.partial(fs.rmtree, sema, s3_base),
functools.partial(fs.rmtree, sema, azure_base)
)
assert not await fs.isdir(file_base)
assert not await fs.isdir(gs_base)
assert not await fs.isdir(s3_base)
assert not await fs.isdir(azure_base)
async def fresh_dir(fs, bases, scheme):
token = secrets.token_hex(16)
dir = f'{bases[scheme]}{token}/'
await fs.mkdir(dir)
return dir
@pytest.fixture(params=['file/file', 'file/gs', 'file/s3', 'file/azure-https',
'gs/file', 'gs/gs', 'gs/s3', 'gs/azure-https',
's3/file', 's3/gs', 's3/s3', 's3/azure-https',
'azure-https/file', 'azure-https/gs', 'azure-https/s3', 'azure-https/azure-https'])
async def diff_test_context(request, router_filesystem: Tuple[asyncio.Semaphore, AsyncFS, Dict[str, str]]):
sema, fs, bases = router_filesystem
[src_scheme, dest_scheme] = request.param.split('/')
src_base = await fresh_dir(fs, bases, src_scheme)
dest_base = await fresh_dir(fs, bases, dest_scheme)
await asyncio.gather(*[
fs.mkdir(x) for x in [f'{src_base}a/', f'{src_base}b/', f'{dest_base}a/', f'{dest_base}b/']])
await asyncio.gather(
fs.write(f'{src_base}same', b'123'),
fs.write(f'{dest_base}same', b'123'),
fs.write(f'{src_base}diff', b'123'),
fs.write(f'{dest_base}diff', b'1'),
fs.write(f'{src_base}src-only', b'123'),
fs.write(f'{dest_base}dest-only', b'123'),
fs.write(f'{src_base}a/same', b'123'),
fs.write(f'{dest_base}a/same', b'123'),
fs.write(f'{src_base}a/diff', b'123'),
fs.write(f'{dest_base}a/diff', b'1'),
fs.write(f'{src_base}a/src-only', b'123'),
fs.write(f'{dest_base}a/dest-only', b'123'),
fs.write(f'{src_base}b/same', b'123'),
fs.write(f'{dest_base}b/same', b'123'),
fs.write(f'{src_base}b/diff', b'123'),
fs.write(f'{dest_base}b/diff', b'1'),
fs.write(f'{src_base}b/src-only', b'123'),
fs.write(f'{dest_base}b/dest-only', b'123'),
)
yield sema, fs, src_base, dest_base
@pytest.mark.asyncio
async def test_diff(diff_test_context):
sema, fs, src_base, dest_base = diff_test_context
expected = {
frozendict({'from': f'{src_base}diff', 'to': f'{dest_base}diff', 'from_size': 3, 'to_size': 1}),
frozendict({'from': f'{src_base}src-only', 'to': f'{dest_base}src-only', 'from_size': 3, 'to_size': None}),
frozendict({'from': f'{src_base}a/diff', 'to': f'{dest_base}a/diff', 'from_size': 3, 'to_size': 1}),
frozendict({'from': f'{src_base}a/src-only', 'to': f'{dest_base}a/src-only', 'from_size': 3, 'to_size': None}),
frozendict({'from': f'{src_base}b/diff', 'to': f'{dest_base}b/diff', 'from_size': 3, 'to_size': 1}),
frozendict({'from': f'{src_base}b/src-only', 'to': f'{dest_base}b/src-only', 'from_size': 3, 'to_size': None}),
}
actual = await diff(source=src_base, target=dest_base)
actual_set = set(frozendict(x) for x in actual)
assert actual_set == expected, str((actual, expected))
try:
result = await diff(source=f'{src_base}doesnotexist', target=dest_base)
except DiffException as exc:
assert 'Source URL refers to no files or directories' in exc.args[0]
else:
assert False, result
expected = [
{'from': f'{src_base}src-only', 'to': f'{dest_base}', 'from_size': 3, 'to_size': None}
]
actual = await diff(source=f'{src_base}src-only', target=f'{dest_base}')
assert actual == expected
expected = [
{'from': f'{src_base}diff', 'to': f'{dest_base}diff', 'from_size': 3, 'to_size': 1}
]
actual = await diff(source=f'{src_base}diff', target=f'{dest_base}diff')
assert actual == expected
expected = []
actual = await diff(source=f'{src_base}same', target=f'{dest_base}same')
assert actual == expected
|
419a64118f9850a848c01caac3a848c009dc3dd5
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/GeneratorInterface/GenFilters/python/ZgammaFilter_cfi.py
|
d76b4d2cd720d8b0cf35c38230295e959f01a7c9
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 777
|
py
|
ZgammaFilter_cfi.py
|
import FWCore.ParameterSet.Config as cms
# values tuned also according to slide 3 of :
# https://indico.cern.ch/getFile.py/access?contribId=23&sessionId=2&resId=0&materialId=slides&confId=271548
# selection efficiency of approx 6% for ZMM_8TeV
myZgammaFilter = cms.EDFilter('ZgammaMassFilter',
HepMCProduct = cms.InputTag("generator","unmeared"),
minPhotonPt = cms.double(7.),
minLeptonPt = cms.double(7.),
minPhotonEta = cms.double(-3),
minLeptonEta = cms.double(-3),
maxPhotonEta = cms.double(3),
maxLeptonEta = cms.double(3),
minDileptonMass = cms.double(30.),
minZgMass = cms.double(40.)
)
ZgammaFilter = cms.Sequence( myZgammaFilter )
|
96d984cf17682942c330df9716bd27bace365712
|
fbbffcc0c0d689b0bca759c37399374c3772f922
|
/python_hook.py
|
f04a59a4b27a64b0ca03a8a197dd5acbe821c36c
|
[] |
no_license
|
holdyeah/wechat-pc-hook-python
|
147872ce9818541e367319ea63eba40ddbb90ad5
|
763a882de5c8aa4f9bb71855a74e02ee66a9e563
|
refs/heads/master
| 2023-03-17T06:17:30.988003
| 2023-03-04T08:44:37
| 2023-03-04T08:44:37
| 204,273,242
| 284
| 86
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,642
|
py
|
python_hook.py
|
#-*- coding: utf-8 -*-
import win32api,win32process,win32con,ctypes,psutil
from ctypes import *
PAGE_READWRITE = 0x00000040
PROCESS_ALL_ACCESS = (0x000F0000|0x00100000|0xFFF)
VIRTUAL_MEM = (0x00001000 | 0x00002000)
dll_path = b"C:\\SendMessage.dll"
print(dll_path)
dll_len = len(dll_path)
kernel32 = ctypes.windll.kernel32
#第一步获取整个系统的进程快照 第二步在快照中去比对进程名
for proc in psutil.process_iter():
try:
if proc.name() == 'WeChat.exe':
print(proc)
break
else:
proc = 0
except (psutil.AccessDenied, psutil.NoSuchProcess):
print ("无 Permission or process")
#第三步用找到的pid去打开进程获取到句柄
if proc == 0:
print("没有找到微信或者微信没有启动")
else:
h_process=kernel32.OpenProcess(PROCESS_ALL_ACCESS,False,(proc.pid))
print('%x'%h_process)
if not h_process:
print('进程打开失败权限不足')
else:
arg_adress=kernel32.VirtualAllocEx(h_process,None,dll_len*10,VIRTUAL_MEM,PAGE_READWRITE)
print('%x'%arg_adress)
NULL = c_int(0)
whhh=kernel32.WriteProcessMemory(h_process,arg_adress,dll_path,dll_len*10,NULL)
h_kernel32 = win32api.GetModuleHandle("kernel32.dll")
h_loadlib = win32api.GetProcAddress(h_kernel32, 'LoadLibraryA')
print('%x'%h_kernel32,'%x'%h_loadlib)
thread_id = c_ulong(0)
c_remt = kernel32.CreateRemoteThread(h_process,None,0,h_loadlib,arg_adress,0,byref(thread_id))
print('%x'%c_remt)
if not c_remt:
print("[!] Failed to inject DLL, exit...")
|
b3194712025cf30799b20175a9c5c1eb0e415e7b
|
8d77f3b72dc52b85ee0c4ef6ba06f63a6920841f
|
/python/aitemplate/compiler/ops/gemm_universal/perm021fc_crc_bias.py
|
7443bfa84a9d83ec9f1d2de73314da501a5acc30
|
[
"Apache-2.0"
] |
permissive
|
facebookincubator/AITemplate
|
b643c217e1d15f7f17dab1eb1cc6855eab664b97
|
c60dc19788217556ba12ea378c02b9fd0aea9ffe
|
refs/heads/main
| 2023-08-28T18:22:15.828008
| 2023-08-28T14:43:41
| 2023-08-28T14:43:41
| 514,321,895
| 4,065
| 334
|
Apache-2.0
| 2023-09-14T04:53:57
| 2022-07-15T15:40:58
|
Python
|
UTF-8
|
Python
| false
| false
| 2,920
|
py
|
perm021fc_crc_bias.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
GEMM Specialization: (A.permute(0, 2, 1)[col] @ B[row] + Bias)
"""
from aitemplate.compiler.base import IntImm, Tensor
from aitemplate.compiler.ops.gemm_universal import perm021fc_crc
from aitemplate.compiler.tensor_accessor import TensorAccessor
# pylint: disable=C0103, W0223, W0221
class perm021fc_crc_bias(perm021fc_crc):
"""GEMM Specialization: (A.permute(0, 2, 1) @ B + Bias)
This one is used when n/m gives you better alignment than m/k.
This op is equivalent to the following PyTorch code:
.. highlight:: python
.. code-block:: python
X_pt = torch.randn(B, K, M).cuda().half()
W_pt = torch.randn(K, N).cuda().half()
B_pt = torch.randn(N).cuda().half()
XT = X_pt.permute(0, 2, 1)
XT = torch.reshape(XT, (-1, K))
WT = W_pt.transpose(0, 1).contiguous()
Y_pt = torch.nn.functional.linear(XT, WT, bias=B_pt)
Y_pt = torch.reshape(Y_pt, (B, M, N)).contiguous()
"""
def __init__(self):
"""Constractor for perm021fc_crc_bias"""
super().__init__()
self._attrs["op"] = "perm021fc_crc_bias"
def _infer_shapes(self, a: Tensor, b: Tensor, bias: Tensor):
bias_shape = bias._attrs["shape"]
if len(bias_shape) != 1:
raise RuntimeError("Bias should be 1D vector ")
bias_dim = bias_shape[0]
if not isinstance(bias_dim, IntImm):
raise RuntimeError("Bias should be fixed 1D vector")
outshape = super()._infer_shapes(a, b)
if outshape[2] != bias_dim:
raise RuntimeError("GEMM/Bias shape doesn't match")
return outshape
def __call__(self, a: Tensor, b: Tensor, bias: Tensor) -> Tensor:
self._attrs["inputs"] = [a, b, bias]
self._attrs["input_accessors"] = [
TensorAccessor(tensor) for tensor in self._attrs["inputs"]
]
self._set_depth()
self._sanity_check(a, b)
output_shape = self._infer_shapes(a, b, bias)
self._extract_epilogue_alignment(output_shape)
output = Tensor(output_shape, src_ops={self}, dtype=a._attrs["dtype"])
self._attrs["outputs"] = [output]
self._attrs["output_accessors"] = [
TensorAccessor(tensor) for tensor in self._attrs["outputs"]
]
return output
|
bab0dabc793c8f677a6781f233d7b796e255a46f
|
8d6f97d71518ea4e32bbb4c332f55aac1fbfefb9
|
/pyvex/lifting/util/instr_helper.py
|
ba79df8184615f1fdce6e09378a29b5c165b4a46
|
[
"BSD-2-Clause"
] |
permissive
|
angr/pyvex
|
acb80c39d42622e930a1fa0cb774a95f1452467a
|
40f151e54e75b5ad57856675b4c4c70247d0cd9c
|
refs/heads/master
| 2023-08-28T18:41:00.203041
| 2023-08-25T19:40:56
| 2023-08-25T19:40:56
| 13,027,352
| 304
| 130
|
BSD-2-Clause
| 2023-09-11T22:08:43
| 2013-09-23T05:02:20
|
Python
|
UTF-8
|
Python
| false
| false
| 17,751
|
py
|
instr_helper.py
|
import abc
import string
import bitstring
from pyvex.expr import IRExpr, RdTmp
from .lifter_helper import ParseError
from .syntax_wrapper import VexValue
from .vex_helper import IRSBCustomizer, JumpKind, vex_int_class
class Instruction(metaclass=abc.ABCMeta):
"""
Base class for an Instruction.
You should make a subclass of this for each instruction you want to lift. These classes will contain the "semantics"
of the instruction, that is, what it _does_, in terms of the VEX IR.
You may want to subclass this for your architecture, and add arch-specific handling for parsing, argument
resolution, etc., and have instructions subclass that instead.
The core parsing functionality is done via ``bin_format``. Each instruction should be a subclass of ``Instruction``
and will be parsed by comparing bits in the provided bitstream to symbols in the ``bin_format`` member of the class.
"Bin formats" are strings of symbols, like those you'd find in an ISA document, such as "0010rrrrddddffmm"
0 or 1 specify hard-coded bits that must match for an instruction to match.
Any letters specify arguments, grouped by letter, which will be parsed and provided as bitstrings in the ``data``
member of the class as a dictionary.
So, in our example, the bits ``0010110101101001``, applied to format string ``0010rrrrddddffmm``
will result in the following in ``self.data``:
{'r': '1101',
'd': '0110',
'f': '10',
'm': '01'}
Implement compute_result to provide the "meat" of what your instruction does.
You can also implement it in your arch-specific subclass of ``Instruction``, to handle things common to all
instructions, and provide instruction implementations elsewhere.
We provide the ``VexValue`` syntax wrapper to make expressing instruction semantics easy.
You first convert the bitstring arguments into ``VexValue``s using the provided convenience methods
(``self.get/put/load/store/etc.``)
This loads the register from the actual registers into a temporary value we can work with.
You can then write it back to a register when you're done.
For example, if you have the register in ``r``, as above, you can make a ``VexValue`` like this:
r = int(self.data['r'], 2) # we get bits corresponding to `r` bits and convert it to an int
r_vv = self.get(r, Type.int_32)
If you then had an instruction to increment ``r``, you could simply:
return r_vv += 1
You could then write it back to the register like this:
self.put(r_vv, r)
Note that most architectures have special flags that get set differently for each instruction, make sure to
implement those as well (override ``set_flags()`` )
Override ``parse()`` to extend parsing.
For example, in MSP430, this allows us to grab extra words from the bitstream
when extra immediate words are present.
All architectures are different enough that there's no magic recipe for how to write a lifter.
See the examples provided by gymrat for ideas of how to use this to build your own lifters quickly and easily.
"""
data = None
irsb_c = None
def __init__(self, bitstrm, arch, addr):
"""
Create an instance of the instruction
:param irsb_c: The IRSBCustomizer to put VEX instructions into
:param bitstrm: The bitstream to decode instructions from
:param addr: The address of the instruction to be lifted, used only for jumps and branches
"""
self.addr = addr
self.arch = arch
self.bitwidth = len(self.bin_format)
self.data = self.parse(bitstrm)
@property
@abc.abstractmethod
def bin_format(self) -> str:
"""
Read the documentation of the class to understand what a bin format string is
:return: str bin format string
"""
@property
@abc.abstractmethod
def name(self) -> str:
"""
Name of the instruction
Can be useful to name the instruction when there's an error related to it
"""
def __call__(self, irsb_c, past_instructions, future_instructions):
self.lift(irsb_c, past_instructions, future_instructions)
def mark_instruction_start(self):
self.irsb_c.imark(self.addr, self.bytewidth, 0)
def fetch_operands(self): # pylint: disable=no-self-use
"""
Get the operands out of memory or registers
Return a tuple of operands for the instruction
"""
return ()
def lift(self, irsb_c: IRSBCustomizer, past_instructions, future_instructions): # pylint: disable=unused-argument
"""
This is the main body of the "lifting" for the instruction.
This can/should be overridden to provide the general flow of how instructions in your arch work.
For example, in MSP430, this is:
- Figure out what your operands are by parsing the addressing, and load them into temporary registers
- Do the actual operation, and commit the result, if needed.
- Compute the flags
"""
self.irsb_c = irsb_c
# Always call this first!
self.mark_instruction_start()
# Then do the actual stuff.
inputs = self.fetch_operands()
retval = self.compute_result(*inputs) # pylint: disable=assignment-from-none
if retval is not None:
self.commit_result(retval)
vals = list(inputs) + [retval]
self.compute_flags(*vals)
def commit_result(self, res):
"""
This where the result of the operation is written to a destination.
This happens only if compute_result does not return None, and happens before compute_flags is called.
Override this to specify how to write out the result.
The results of fetch_operands can be used to resolve various addressing modes for the write outward.
A common pattern is to return a function from fetch_operands which will be called here to perform the write.
:param args: A tuple of the results of fetch_operands and compute_result
"""
def compute_result(self, *args): # pylint: disable=unused-argument,no-self-use
"""
This is where the actual operation performed by your instruction, excluding the calculation of flags, should be
performed. Return the VexValue of the "result" of the instruction, which may
be used to calculate the flags later.
For example, for a simple add, with arguments src and dst, you can simply write:
return src + dst:
:param args:
:return: A VexValue containing the "result" of the operation.
"""
return None
def compute_flags(self, *args):
"""
Most CPU architectures have "flags" that should be computed for many instructions.
Override this to specify how that happens. One common pattern is to define this method to call specifi methods
to update each flag, which can then be overriden in the actual classes for each instruction.
"""
def match_instruction(self, data, bitstrm): # pylint: disable=unused-argument,no-self-use
"""
Override this to extend the parsing functionality.
This is great for if your arch has instruction "formats" that have an opcode that has to match.
:param data:
:param bitstrm:
:return: data
"""
return data
def parse(self, bitstrm):
if self.arch.instruction_endness == "Iend_LE":
# This arch stores its instructions in memory endian-flipped compared to the ISA.
# To enable natural lifter-writing, we let the user write them like in the manual, and correct for
# endness here.
instr_bits = self._load_le_instr(bitstrm, self.bitwidth)
else:
instr_bits = bitstrm.peek("bin:%d" % self.bitwidth)
data = {c: "" for c in self.bin_format if c in string.ascii_letters}
for c, b in zip(self.bin_format, instr_bits):
if c in "01":
if b != c:
raise ParseError("Mismatch between format bit %c and instruction bit %c" % (c, b))
elif c in string.ascii_letters:
data[c] += b
else:
raise ValueError("Invalid bin_format character %c" % c)
# Hook here for extra matching functionality
if hasattr(self, "match_instruction"):
# Should raise if it's not right
self.match_instruction(data, bitstrm)
# Use up the bits once we're sure it's right
self.rawbits = bitstrm.read("hex:%d" % self.bitwidth)
# Hook here for extra parsing functionality (e.g., trailers)
if hasattr(self, "_extra_parsing"):
data = self._extra_parsing(data, bitstrm) # pylint: disable=no-member
return data
@property
def bytewidth(self):
if self.bitwidth % self.arch.byte_width != 0:
raise ValueError("Instruction is not a multiple of bytes wide!")
return self.bitwidth // self.arch.byte_width
def disassemble(self):
"""
Return the disassembly of this instruction, as a string.
Override this in subclasses.
:return: The address (self.addr), the instruction's name, and a list of its operands, as strings
"""
return self.addr, "UNK", [self.rawbits]
# These methods should be called in subclasses to do register and memory operations
def load(self, addr, ty):
"""
Load a value from memory into a VEX temporary register.
:param addr: The VexValue containing the addr to load from.
:param ty: The Type of the resulting data
:return: a VexValue
"""
rdt = self.irsb_c.load(addr.rdt, ty)
return VexValue(self.irsb_c, rdt)
def constant(self, val, ty):
"""
Creates a constant as a VexValue
:param val: The value, as an integer
:param ty: The type of the resulting VexValue
:return: a VexValue
"""
if isinstance(val, VexValue) and not isinstance(val, IRExpr):
raise Exception("Constant cannot be made from VexValue or IRExpr")
rdt = self.irsb_c.mkconst(val, ty)
return VexValue(self.irsb_c, rdt)
@staticmethod
def _lookup_register(arch, reg):
if isinstance(reg, int):
if hasattr(arch, "register_index"):
reg = arch.register_index[reg]
else:
reg = arch.register_list[reg].name
return arch.get_register_offset(reg)
def get(self, reg, ty):
"""
Load a value from a machine register into a VEX temporary register.
All values must be loaded out of registers before they can be used with operations, etc
and stored back into them when the instruction is over. See Put().
:param reg: Register number as an integer, or register string name
:param ty: The Type to use.
:return: A VexValue of the gotten value.
"""
offset = self._lookup_register(self.irsb_c.irsb.arch, reg)
if offset == self.irsb_c.irsb.arch.ip_offset:
return self.constant(self.addr, ty)
rdt = self.irsb_c.rdreg(offset, ty)
return VexValue(self.irsb_c, rdt)
def put(self, val, reg):
"""
Puts a value from a VEX temporary register into a machine register.
This is how the results of operations done to registers get committed to the machine's state.
:param val: The VexValue to store (Want to store a constant? See Constant() first)
:param reg: The integer register number to store into, or register name
:return: None
"""
offset = self._lookup_register(self.irsb_c.irsb.arch, reg)
self.irsb_c.put(val.rdt, offset)
def put_conditional(self, cond, valiftrue, valiffalse, reg):
"""
Like put, except it checks a condition
to decide what to put in the destination register.
:param cond: The VexValue representing the logical expression for the condition
(if your expression only has constants, don't use this method!)
:param valiftrue: the VexValue to put in reg if cond evals as true
:param validfalse: the VexValue to put in reg if cond evals as false
:param reg: The integer register number to store into, or register name
:return: None
"""
val = self.irsb_c.ite(cond.rdt, valiftrue.rdt, valiffalse.rdt)
offset = self._lookup_register(self.irsb_c.irsb.arch, reg)
self.irsb_c.put(val, offset)
def store(self, val, addr):
"""
Store a VexValue in memory at the specified loaction.
:param val: The VexValue of the value to store
:param addr: The VexValue of the address to store into
:return: None
"""
self.irsb_c.store(addr.rdt, val.rdt)
def jump(self, condition, to_addr, jumpkind=JumpKind.Boring, ip_offset=None):
"""
Jump to a specified destination, under the specified condition.
Used for branches, jumps, calls, returns, etc.
:param condition: The VexValue representing the expression for the guard, or None for an unconditional jump
:param to_addr: The address to jump to.
:param jumpkind: The JumpKind to use. See the VEX docs for what these are; you only need them for things
aren't normal jumps (e.g., calls, interrupts, program exits, etc etc)
:return: None
"""
to_addr_ty = None
if isinstance(to_addr, VexValue):
# Unpack a VV
to_addr_rdt = to_addr.rdt
to_addr_ty = to_addr.ty
elif isinstance(to_addr, int):
# Direct jump to an int, make an RdT and Ty
to_addr_ty = vex_int_class(self.irsb_c.irsb.arch.bits).type
to_addr = self.constant(to_addr, to_addr_ty) # TODO archinfo may be changing
to_addr_rdt = to_addr.rdt
elif isinstance(to_addr, RdTmp):
# An RdT; just get the Ty of the arch's pointer type
to_addr_ty = vex_int_class(self.irsb_c.irsb.arch.bits).type
to_addr_rdt = to_addr
else:
raise TypeError("Jump destination has unknown type: " + repr(type(to_addr)))
if not condition:
# This is the default exit.
self.irsb_c.irsb.jumpkind = jumpkind
self.irsb_c.irsb.next = to_addr_rdt
else:
# add another exit
# EDG says: We should make sure folks set ArchXYZ.ip_offset like they're supposed to
if ip_offset is None:
ip_offset = self.arch.ip_offset
assert ip_offset is not None
negated_condition_rdt = self.ite(condition, self.constant(0, condition.ty), self.constant(1, condition.ty))
direct_exit_target = self.constant(self.addr + (self.bitwidth // 8), to_addr_ty)
self.irsb_c.add_exit(negated_condition_rdt, direct_exit_target.rdt, jumpkind, ip_offset)
self.irsb_c.irsb.jumpkind = jumpkind
self.irsb_c.irsb.next = to_addr_rdt
def ite(self, cond, t, f):
return self.irsb_c.ite(cond.rdt, t.rdt, f.rdt)
def ccall(self, ret_type, func_name, args):
"""
Creates a CCall operation.
A CCall is a procedure that calculates a value at *runtime*, not at lift-time.
You can use these for flags, unresolvable jump targets, etc.
We caution you to avoid using them when at all possible though.
:param ret_type: The return type of the CCall
:param func_obj: The name of the helper function to call. If you're using angr, this should be added (or
monkeypatched) into ``angr.engines.vex.claripy.ccall``.
:param args: List of arguments to the function
:return: A VexValue of the result.
"""
# Check the args to make sure they're the right type
list_args = list(args)
new_args = []
for arg in list_args:
if isinstance(arg, VexValue):
arg = arg.rdt
new_args.append(arg)
args = tuple(new_args)
cc = self.irsb_c.op_ccall(ret_type, func_name, args)
return VexValue(self.irsb_c, cc)
def dirty(self, ret_type, func_name, args) -> VexValue:
"""
Creates a dirty call operation.
These are like ccalls (clean calls) but their implementations are theoretically allowed to read or write to or
from any part of the state, making them a nightmare for static analysis to reason about. Avoid their use at all
costs.
:param ret_type: The return type of the dirty call, or None if the dirty call doesn't return anything.
:param func_name: The name of the helper function to call. If you're using angr, this should be added (or
monkeypatched) into ``angr.engines.vex.heavy.dirty``.
:param args: List of arguments to the function
:return: A VexValue of the result.
"""
# Check the args to make sure they're the right type
list_args = list(args)
new_args = []
for arg in list_args:
if isinstance(arg, VexValue):
arg = arg.rdt
new_args.append(arg)
args = tuple(new_args)
rdt = self.irsb_c.dirty(ret_type, func_name, args)
return VexValue(self.irsb_c, rdt)
def _load_le_instr(self, bitstream: bitstring.ConstBitStream, numbits: int) -> str:
return bitstring.Bits(uint=bitstream.peek("uintle:%d" % numbits), length=numbits).bin
|
9081bac3b96766ec232644556b278d65463a48a6
|
29dfa1deefc72493d1b1eecf1a8df62e24599a77
|
/dfvfs/encoding/base64_decoder.py
|
395620456f3bca6646bd0bc1f846e52ccf08da61
|
[
"Apache-2.0"
] |
permissive
|
log2timeline/dfvfs
|
fd301eaf721a9945641a44ff722aec963158a6b3
|
28756d910e951a22c5f0b2bcf5184f055a19d544
|
refs/heads/main
| 2023-08-07T22:45:45.432668
| 2023-07-30T12:17:56
| 2023-07-30T12:17:56
| 23,820,144
| 197
| 65
|
Apache-2.0
| 2023-07-30T12:17:58
| 2014-09-09T05:06:44
|
Python
|
UTF-8
|
Python
| false
| false
| 1,198
|
py
|
base64_decoder.py
|
# -*- coding: utf-8 -*-
"""The base64 decoder implementation."""
import base64
import binascii
from dfvfs.encoding import decoder
from dfvfs.encoding import manager
from dfvfs.lib import definitions
from dfvfs.lib import errors
class Base64Decoder(decoder.Decoder):
"""Base64 decoder using base64."""
ENCODING_METHOD = definitions.ENCODING_METHOD_BASE64
def Decode(self, encoded_data):
"""Decode the encoded data.
Args:
encoded_data (byte): encoded data.
Returns:
tuple(bytes, bytes): decoded data and remaining encoded data.
Raises:
BackEndError: if the base64 stream cannot be decoded.
"""
try:
# TODO: replace by libuna implementation or equivalent. The behavior of
# base64.b64decode() does not raise TypeError for certain invalid base64
# data e.g. b'\x01\x02\x03\x04\x05\x06\x07\x08' these are silently
# ignored.
decoded_data = base64.b64decode(encoded_data)
except (TypeError, binascii.Error) as exception:
raise errors.BackEndError(
f'Unable to decode base64 stream with error: {exception!s}.')
return decoded_data, b''
manager.EncodingManager.RegisterDecoder(Base64Decoder)
|
18b67ada81998bd3a173c4d1b51ae4f047208888
|
444a9480bce2035565332d4d4654244c0b5cd47b
|
/research/cv/DecoMR/models/DMR.py
|
813a8e3cd66980cc2b5bbbe73f372b6b7e2c0481
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
mindspore-ai/models
|
7ede9c6454e77e995e674628204e1c6e76bd7b27
|
eab643f51336dbf7d711f02d27e6516e5affee59
|
refs/heads/master
| 2023-07-20T01:49:34.614616
| 2023-07-17T11:43:18
| 2023-07-17T11:43:18
| 417,393,380
| 301
| 92
|
Apache-2.0
| 2023-05-17T11:22:28
| 2021-10-15T06:38:37
|
Python
|
UTF-8
|
Python
| false
| false
| 1,074
|
py
|
DMR.py
|
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
The model of DMR
"""
from mindspore import nn
class DMR(nn.Cell):
def __init__(self, CNet, LNet):
super(DMR, self).__init__()
self.CNet = CNet
self.LNet = LNet
def construct(self, images):
pred_dp, dp_feature, codes = self.CNet(images)
pred_uv_map, pred_camera = self.LNet(pred_dp, dp_feature, codes)
return pred_dp, pred_uv_map, pred_camera
|
9e7ab741c9f884efd925278df7b58c5d4dfc5b96
|
ac235a23f22be0d6f1818bb53902177f9969813a
|
/ddtrace/internal/telemetry/metrics.py
|
057227690b2ee1da5a79fdc073abfa93f50677c7
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
DataDog/dd-trace-py
|
f09d6d48c4c69aea68f999fc8a458ade5c6150cf
|
1e3bd6d4edef5cda5a0831a6a7ec8e4046659d17
|
refs/heads/1.x
| 2023-09-01T20:25:26.746324
| 2023-09-01T18:54:37
| 2023-09-01T18:54:37
| 61,572,326
| 461
| 426
|
NOASSERTION
| 2023-09-14T20:38:57
| 2016-06-20T18:52:23
|
Python
|
UTF-8
|
Python
| false
| false
| 5,044
|
py
|
metrics.py
|
# -*- coding: utf-8 -*-
import abc
import time
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
import six
MetricTagType = Optional[Tuple[Tuple[str, str], ...]]
class Metric(six.with_metaclass(abc.ABCMeta)):
"""
Telemetry Metrics are stored in DD dashboards, check the metrics in datadoghq.com/metric/explorer
"""
metric_type = ""
__slots__ = ["namespace", "name", "_tags", "is_common_to_all_tracers", "interval", "_points", "_count"]
def __init__(self, namespace, name, tags, common, interval=None):
# type: (str, str, MetricTagType, bool, Optional[float]) -> None
"""
namespace: the scope of the metric: tracer, appsec, etc.
name: string
tags: extra information attached to a metric
common: set to True if a metric is common to all tracers, false if it is python specific
interval: field set for gauge and rate metrics, any field set is ignored for count metrics (in secs)
"""
self.name = name.lower()
self.is_common_to_all_tracers = common
self.interval = interval
self.namespace = namespace
self._tags = tags
self._count = 0.0
self._points = [] # type: List
@classmethod
def get_id(cls, name, namespace, tags, metric_type):
# type: (str, str, MetricTagType, str) -> int
"""
https://www.datadoghq.com/blog/the-power-of-tagged-metrics/#whats-a-metric-tag
"""
return hash((name, namespace, tags, metric_type))
def __hash__(self):
return self.get_id(self.name, self.namespace, self._tags, self.metric_type)
@abc.abstractmethod
def add_point(self, value=1.0):
# type: (float) -> None
"""adds timestamped data point associated with a metric"""
pass
def to_dict(self):
# type: () -> Dict
"""returns a dictionary containing the metrics fields expected by the telemetry intake service"""
data = {
"metric": self.name,
"type": self.metric_type,
"common": self.is_common_to_all_tracers,
"points": self._points,
"tags": ["{}:{}".format(k, v).lower() for k, v in self._tags] if self._tags else [],
}
if self.interval is not None:
data["interval"] = int(self.interval)
return data
class CountMetric(Metric):
"""
A count type adds up all the submitted values in a time interval. This would be suitable for a
metric tracking the number of website hits, for instance.
"""
metric_type = "count"
def add_point(self, value=1.0):
# type: (float) -> None
"""adds timestamped data point associated with a metric"""
if self._points:
self._points[0][1] += value
else:
self._points = [[time.time(), value]]
class GaugeMetric(Metric):
"""
A gauge type takes the last value reported during the interval. This type would make sense for tracking RAM or
CPU usage, where taking the last value provides a representative picture of the host’s behavior during the time
interval. In this case, using a different type such as count would probably lead to inaccurate and extreme values.
Choosing the correct metric type ensures accurate data.
"""
metric_type = "gauge"
def add_point(self, value=1.0):
# type: (float) -> None
"""adds timestamped data point associated with a metric"""
self._points = [(time.time(), value)]
class RateMetric(Metric):
"""
The rate type takes the count and divides it by the length of the time interval. This is useful if you’re
interested in the number of hits per second.
"""
metric_type = "rate"
def add_point(self, value=1.0):
# type: (float) -> None
"""Example:
https://github.com/DataDog/datadogpy/blob/ee5ac16744407dcbd7a3640ee7b4456536460065/datadog/threadstats/metrics.py#L181
"""
self._count += value
rate = (self._count / self.interval) if self.interval else 0.0
self._points = [(time.time(), rate)]
class DistributionMetric(Metric):
"""
The rate type takes the count and divides it by the length of the time interval. This is useful if you’re
interested in the number of hits per second.
"""
metric_type = "distributions"
def add_point(self, value=1.0):
# type: (float) -> None
"""Example:
https://github.com/DataDog/datadogpy/blob/ee5ac16744407dcbd7a3640ee7b4456536460065/datadog/threadstats/metrics.py#L181
"""
self._points.append(value)
def to_dict(self):
# type: () -> Dict
"""returns a dictionary containing the metrics fields expected by the telemetry intake service"""
data = {
"metric": self.name,
"points": self._points,
"tags": ["{}:{}".format(k, v).lower() for k, v in self._tags] if self._tags else [],
}
return data
|
b7132fa30d67574e87fff1582aabe6f0b92b549e
|
cdbeb80f60cbaed50a8280a177be6f732c0ba279
|
/gen_index/syn_validation_run.py
|
6b53522b3953f062a9dc4666592fcfb2918381a0
|
[
"Apache-2.0"
] |
permissive
|
UniversalDependencies/docs
|
e1c2a4a516c18f11dbfa7bf7ce024c9a5d26023b
|
758f9189af904561aa91171e99101fa81b82d98a
|
refs/heads/pages-source
| 2023-08-05T00:18:51.264412
| 2023-08-03T09:21:50
| 2023-08-03T09:21:50
| 18,795,673
| 236
| 252
|
Apache-2.0
| 2023-08-06T12:18:55
| 2014-04-15T10:09:23
|
HTML
|
UTF-8
|
Python
| false
| false
| 10,160
|
py
|
syn_validation_run.py
|
import os.path
import glob
import traceback
import yaml
import sys
import codecs
import subprocess
import cStringIO
import re
import urllib
import itertools
import json
import datetime
THISDIR=os.path.dirname(os.path.abspath(__file__))
def read_conll(inp,maxsent):
""" Read conll format file and yield one sentence at a time as a list of lists of columns. If inp is a string it will be interpreted as filename, otherwise as open file for reading in unicode"""
if isinstance(inp,basestring):
f=codecs.open(inp,u"rt",u"utf-8")
else:
f=codecs.getreader("utf-8")(inp) # read stdin
count=0
sent=[]
comments=[]
for line in f:
line=line.strip()
if not line:
if sent:
count+=1
yield sent, comments
if maxsent!=0 and count>=maxsent:
break
sent=[]
comments=[]
elif line.startswith(u"#"):
if sent:
raise ValueError("Missing newline after sentence")
comments.append(line)
continue
else:
sent.append(line.split(u"\t"))
else:
if sent:
yield sent, comments
if isinstance(inp,basestring):
f.close() #Close it if you opened it
langre=re.compile(ur"# db-name: /home/ginter/UD/.*?/UD_(.*?)/[^/]+.db$")
def get_lang(comments):
"""Given the conllu comments, get the lang name"""
for c in comments:
match=langre.match(c)
if match:
return match.group(1)
return None
hitre=re.compile(ur"# visual-style\t([0-9]+)\tbgColor:lightgreen$")
def get_hit_indices(comments):
results=[]
for c in comments:
match=hitre.match(c)
if match:
results.append(int(match.group(1))-1)
return results
def searchurl(l,q):
qstr=urllib.urlencode({"db":l+"-dev","search":q})
return "http://bionlp-www.utu.fi/dep_search/query?%s"%qstr
def searchlink(l,q,txt):
return '<a href="%s">%s</a>'%(searchurl(l,q),str(txt))
class LangStat:
@classmethod
def from_list(cls,data):
"""Makes a new LangStat from data which is loaded from json,
which is basically a list of lang,hits,poshits"""
lang,hits,poshits=data
newLS=cls(lang)
newLS.hits=hits
newLS.poshits=poshits
return newLS
def to_list(self):
return self.lang,self.hits,self.poshits
def __init__(self,l):
self.lang=l
self.hits=0
self.poshits={}
def hit(self,pos):
self.hits+=1
self.poshits[pos]=self.poshits.get(pos,0)+1
def hit_table(langs,q):
#langs: {lang -> stats} --- this is the value of stats_cache for a given test
allpos=sorted(set(itertools.chain(*(stats.poshits.iterkeys() for stats in langs.itervalues()))))
print >> out8, '<table>'
print >> out8, u'<tr><th/>',u"".join(u"<th>"+p+u"</th>" for p in allpos), u"</tr>"
for l in sorted(langs):
if not any(langs[l].poshits.get(p) for p in allpos):
continue
print >> out8, u'<tr><td>%s</td>'%l
for p in allpos:
print >> out8, u'<td>%s</td>'%(searchlink(l,q.replace(u"_",p,1),langs[l].poshits.get(p,u" ")))
print >> out8, u'</tr>'
print >> out8, '</table>'
ID,FORM,LEMMA,UPOS,XPOS,FEAT,HEAD,DEPREL,DEPS,MISC=range(10)
def run_test(lang,expr):
global args
"""Runs one test on one language. Language is given with UD_ and
is really a repo name. Returns a LangStat()"""
cmd="python %s/query.py --max 10000000000 -d '%s/%s/*.db' '%s'"%(args.dep_search,os.path.abspath(args.ud_data),lang,expr)
p=subprocess.Popen(cmd,stdin=None,stdout=subprocess.PIPE,stderr=subprocess.PIPE,shell=True)
out,err=p.communicate()
ls=LangStat(lang)
for sent,comments in read_conll(cStringIO.StringIO(out),0):
hits=get_hit_indices(comments)
assert hits
for h in hits:
ls.hit(sent[h][UPOS])
return ls
def load_test_cache(f_name):
"""Loads all test results from f_name (json) and returns
a dictionary {expression -> {lang -> LangStat()}}"""
if not os.path.exists(f_name):
return {}
try:
with open(f_name,"r") as f:
d_tmp=json.load(f)
for test,langdict in d_tmp.iteritems():
for lang in langdict.keys():
langdict[lang]=LangStat.from_list(langdict[lang])
return d_tmp
except:
traceback.print_exc()
return {}
def save_test_cache(d,f_name):
"""Opposite of load_test_cache"""
newD={}
for test,langdict in d.iteritems():
newD[test]=dict(((lang,stats.to_list()) for lang,stats in langdict.iteritems()))
with open(f_name,"w") as f:
d=json.dump(newD,f)
def main(tests,test_cache,languages):
"""tests is the stuff you get from yaml
test_cache is the test result dictionary"""
for t in tests:
#Make sure you have this test for every language
for l in languages:
if l not in test_cache.get(t["expr"],{}):
test_cache.setdefault(t["expr"],{})[l]=run_test(l,t["expr"])
#Forget languages you don't need
todel=set(test_cache[t["expr"]])-set(languages)
for l in todel:
print >> sys.stderr, "Deleting", l
del test_cache[t["expr"]][l]
#remember: test_cache: {testexpression -> {language -> stats}}
print >> out8, "#", t["name"]
print >> out8
print >> out8, t["desc"]
print >> out8
print >> out8, u"Search expression: `"+t["expr"]+u"`"
if "pos_ex" in t:
print >> out8
print >> out8, u"Correct example:"
print >> out8
print >> out8, u"~~~ sdparse"
print >> out8
print >> out8, t["pos_ex"]
print >> out8
print >> out8, u"~~~"
print >> out8
if "neg_ex" in t:
print >> out8
print >> out8, u"Incorrect example:"
print >> out8
print >> out8, u"~~~ sdparse"
print >> out8
print >> out8, t["neg_ex"]
print >> out8
print >> out8, u"~~~"
print >> out8
print >> out8
print >> out8, u"<a href=\"" + t["link"] + u"\">Link to documentation</a>"
print >> out8
print >> out8, u'<div id="accordion" class="jquery-ui-accordion">'
print >> out8, u"<div>"
print >> out8, u'<span class="doublewidespan" style="padding-left:3em">%s</span>'%u"Hit overview"
print >> out8, u'<span class="widespan"> </span>'
print >> out8, u"</div>"
print >> out8, u'<div>'
if t["expr"].startswith(u"_"):
hit_table(test_cache[t["expr"]],t["expr"])
elif "expr-pos" in t:
hit_table(test_cache[t["expr"]],t["expr-pos"])
else:
print >> out8, u"Hits table not produced since the query does not start with the simple token spec '_'. Please add 'expr-pos' to the test which starts with '_' that will be substituted for the various POS in the links"
print >> out8, u'</div>'
for l in languages:
if test_cache[t["expr"]][l].hits == 0:
continue
print >> out8, u"<div>"
print >> out8, u'<span class="doublewidespan" style="padding-left:3em">%s</span>'%l
print >> out8, u'<span class="widespan">%d hits</span>'%test_cache[t["expr"]][l].hits
print >> out8, u"</div>"
print >> out8, u"<div>"
q=urllib.urlencode({"db":l,"search":t["expr"]})
print >> out8, '<a href="http://bionlp-www.utu.fi/dep_search/query?%s-dev">Go to search</a><p/>'%q
print >> out8, u"</div>"
print >> out8, u"</div>"
print >> out8
print >> out8
# break
if __name__=="__main__":
import argparse
parser = argparse.ArgumentParser(description='generates the syntactic validation tables')
parser.add_argument('--ud-data', default="/home/ginter/UD/ud_dbs_12",help='Where is the UD data indexed by dep_search? (DIRECTORY)')
parser.add_argument('--rerun',default=None,help='Comma-separated list of languages whose tests should be wiped and re-run')
parser.add_argument('--test-cache',default="test_cache.json",help='Cache with test results. Will be created if not existing')
parser.add_argument('--empty-test-cache',default=False,action="store_true",help='Rerun all tests')
parser.add_argument('--dep-search', default="/home/ginter/dep_search",help='Where is the dep-search home? (DIRECTORY)')
parser.add_argument('--tests', default=os.path.join(THISDIR,"stests.yaml"),help='Yaml file with the tests')
args = parser.parse_args()
out8=codecs.getwriter("utf-8")(sys.stdout)
print >> out8, u"---"
print >> out8, u"layout: base"
print >> out8, u"title: 'Universal Dependencies --- Syntactic validation'"
print >> out8, u"udver: '2'"
print >> out8, u"---"
print >> out8
print >> out8, u'Regenerated <time class="timeago" datetime="%(zulu)sZ">%(zulu)s zulu</time>'%{'zulu':datetime.datetime.utcnow().replace(microsecond=0).isoformat()}
print >> out8
try:
languages=sorted(map(os.path.basename,glob.glob(os.path.join(args.ud_data,'UD_*'))))
test_cache=load_test_cache(args.test_cache)
if args.rerun is not None:
langs_to_wipe=args.rerun.split(",")
else:
langs_to_wipe=[]
all_langs=set(languages)
langs_to_wipe=set(langs_to_wipe)
#Now we want to forget cached tests for the --rerun languages
for _,lang_results in test_cache.iteritems():
to_wipe=(set(lang_results)-all_langs)|(langs_to_wipe&set(lang_results))
for lw in to_wipe:
del lang_results[lw]
with codecs.open(args.tests,"r","utf-8") as t:
tests=yaml.load(t)
main(tests,test_cache,languages)
save_test_cache(test_cache,args.test_cache)
except:
traceback.print_exc(file=sys.stdout)
|
84f3be0df2a38126d37cb686a22f0df3a3094e35
|
67cc5db4593e2cdd109e589e13fb07074bcff5d9
|
/tests/npbench/misc/mandelbrot1_test.py
|
13ee414c340063817f4b2f8390e48c1040104978
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
spcl/dace
|
39849b1488e8f59f880fc0e2572687556c51847d
|
c5ca99ad37e7ceef6da71026c3c8bb579f64117f
|
refs/heads/master
| 2023-08-31T10:45:09.480018
| 2023-08-30T06:05:10
| 2023-08-30T06:05:10
| 172,703,996
| 402
| 114
|
BSD-3-Clause
| 2023-09-14T15:18:29
| 2019-02-26T12:05:50
|
Python
|
UTF-8
|
Python
| false
| false
| 4,845
|
py
|
mandelbrot1_test.py
|
# Copyright 2019-2022 ETH Zurich and the DaCe authors. All rights reserved.
# Original application code: NPBench - https://github.com/spcl/npbench
import dace.dtypes
import numpy as np
import dace as dc
import pytest
import argparse
from dace.fpga_testing import fpga_test, xilinx_test
from dace.transformation.interstate import FPGATransformSDFG, InlineSDFG
from dace.transformation.dataflow import StreamingMemory, StreamingComposition
from dace.transformation.auto.auto_optimize import auto_optimize, fpga_auto_opt
from dace.config import set_temporary
XN, YN, N = (dc.symbol(s, dtype=dc.int64) for s in ['XN', 'YN', 'N'])
@dc.program
def linspace(start: dc.float64, stop: dc.float64, X: dc.float64[N]):
dist = (stop - start) / (N - 1)
for i in dc.map[0:N]:
X[i] = start + i * dist
@dc.program
def mandelbrot_kernel(xmin: dc.float64, xmax: dc.float64, ymin: dc.float64, ymax: dc.float64, maxiter: dc.int64,
horizon: dc.float64):
# Adapted from https://www.ibm.com/developerworks/community/blogs/jfp/...
# .../entry/How_To_Compute_Mandelbrodt_Set_Quickly?lang=en
X = np.ndarray((XN, ), dtype=np.float64)
Y = np.ndarray((YN, ), dtype=np.float64)
linspace(xmin, xmax, X)
linspace(ymin, ymax, Y)
# C = X + np.reshape(Y, (YN, 1)) * 1j
C = np.ndarray((YN, XN), dtype=np.complex128)
for i, j in dc.map[0:YN, 0:XN]:
C[i, j] = X[j] + Y[i] * 1j
N = np.zeros(C.shape, dtype=np.int64)
Z = np.zeros(C.shape, dtype=np.complex128)
for n in range(maxiter):
I = np.less(np.absolute(Z), horizon)
N[I] = n
# np.positive(n, out=N, where=I)
# for j, k in dace.map[0:YN, 0:XN]:
# if I[j, k]:
# N[j, k] = n
# Z[I] = Z[I]**2 + C[I]
# np.add(np.power(Z, 2, where=I), C, out=Z, where=I)
for j, k in dc.map[0:YN, 0:XN]:
if I[j, k]:
Z[j, k] = Z[j, k]**2 + C[j, k]
N[N == maxiter - 1] = 0
# np.positive(0, out=N, where=N==maxiter-1)
# for j, k in dace.map[0:YN, 0:XN]:
# if N[j, k] == maxiter-1:
# N[j, k] = 0
return Z, N
def ground_truth(xmin, xmax, ymin, ymax, xn, yn, maxiter, horizon=2.0):
# Adapted from https://www.ibm.com/developerworks/community/blogs/jfp/...
# .../entry/How_To_Compute_Mandelbrodt_Set_Quickly?lang=en
X = np.linspace(xmin, xmax, xn, dtype=np.float64)
Y = np.linspace(ymin, ymax, yn, dtype=np.float64)
C = X + Y[:, None] * 1j
N = np.zeros(C.shape, dtype=np.int64)
Z = np.zeros(C.shape, dtype=np.complex128)
for n in range(maxiter):
I = np.less(abs(Z), horizon)
N[I] = n
Z[I] = Z[I]**2 + C[I]
N[N == maxiter - 1] = 0
return Z, N
def run_mandelbrot1(device_type: dace.dtypes.DeviceType):
'''
Runs mandelbrot1 for the given device
:return: the SDFG
'''
# Initialize data (npbench small size)
xmin, xmax, XN, ymin, ymax, YN, maxiter, horizon = -1.75, 0.25, 125, -1.00, 1.00, 125, 60, 2.0
if device_type in {dace.dtypes.DeviceType.CPU, dace.dtypes.DeviceType.GPU}:
# Parse the SDFG and apply auto-opt
sdfg = mandelbrot_kernel.to_sdfg()
sdfg = auto_optimize(sdfg, device_type)
Z, N = sdfg(xmin, xmax, ymin, ymax, maxiter, horizon, XN=XN, YN=YN)
elif device_type == dace.dtypes.DeviceType.FPGA:
# Parse SDFG and apply FPGA friendly optimization
sdfg = mandelbrot_kernel.to_sdfg(simplify=True)
applied = sdfg.apply_transformations([FPGATransformSDFG])
assert applied == 1
sdfg.apply_transformations_repeated([InlineSDFG], print_report=True)
sdfg.specialize(dict(XN=XN, YN=YN))
Z, N = sdfg(xmin, xmax, ymin, ymax, maxiter, horizon)
# Compute ground truth and validate
Z_ref, N_ref = ground_truth(xmin, xmax, ymin, ymax, xn, yn, maxiter)
assert np.allclose(Z, Z_ref)
assert np.allclose(N, N_ref)
return sdfg
@pytest.mark.skip(reason="Parsing error")
def test_cpu():
run_mandelbrot1(dace.dtypes.DeviceType.CPU)
@pytest.mark.skip(reason="Parsing error")
@pytest.mark.gpu
def test_gpu():
run_mandelbrot1(dace.dtypes.DeviceType.GPU)
@pytest.mark.skip(reason="Parsing error")
@fpga_test(assert_ii_1=False)
def test_fpga():
return run_mandelbrot1(dace.dtypes.DeviceType.FPGA)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--target", default='cpu', choices=['cpu', 'gpu', 'fpga'], help='Target platform')
args = vars(parser.parse_args())
target = args["target"]
if target == "cpu":
run_mandelbrot1(dace.dtypes.DeviceType.CPU)
elif target == "gpu":
run_mandelbrot1(dace.dtypes.DeviceType.GPU)
elif target == "fpga":
run_mandelbrot1(dace.dtypes.DeviceType.FPGA)
|
aa088d1042acbcbeeabbefe58b0533aee56449d0
|
f305f84ea6f721c2391300f0a60e21d2ce14f2a5
|
/20_杂题/牛客编程巅峰赛/69_树节点价值和-层序等差数列.py
|
5938e5348a031cec3229689a73c7abae619616d8
|
[] |
no_license
|
981377660LMT/algorithm-study
|
f2ada3e6959338ae1bc21934a84f7314a8ecff82
|
7e79e26bb8f641868561b186e34c1127ed63c9e0
|
refs/heads/master
| 2023-09-01T18:26:16.525579
| 2023-09-01T12:21:58
| 2023-09-01T12:21:58
| 385,861,235
| 225
| 24
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,165
|
py
|
69_树节点价值和-层序等差数列.py
|
#
# 代码中的类名、方法名、参数名已经指定,请勿修改,直接返回方法规定的值即可
#
# @param n long长整型 表示标准完全二叉树的结点个数
# @return long长整型
#
from math import floor, log2
MOD = 998244353
# n<=10^9
class Solution:
# 一个一个加肯定超时
def tree4TLE(self, n: int) -> int:
# write code here
self.res = 0
def dfs(root: int, depth: int):
self.res += depth * root
self.res %= MOD
if root << 1 <= n:
dfs(root << 1, depth + 1)
if (root << 1 | 1) <= n:
dfs(root << 1 | 1, depth + 1)
dfs(1, 1)
return self.res % MOD
# 分层等差数列统计
def tree4(self, n: int) -> int:
res = 0
TREE_DEPTH = floor(log2(n)) + 1
depth, left, right = 1, 1, 1
while depth <= TREE_DEPTH:
res += depth * (left + right) * (right - left + 1) // 2
res %= MOD
depth += 1
left, right = min(n, left << 1), min(n, right << 1 | 1)
return res % MOD
|
cb26b2ef7cb6179e52c0f737586c93cc11ef8b0b
|
1742b6719b988e5519373002305e31d28b8bd691
|
/sdk/python/pulumi_aws/dms/outputs.py
|
33750b0297bc88701b98f55fda6fb1a8e329877a
|
[
"BSD-3-Clause",
"Apache-2.0",
"MPL-2.0"
] |
permissive
|
pulumi/pulumi-aws
|
4f7fdb4a816c5ea357cff2c2e3b613c006e49f1a
|
42b0a0abdf6c14da248da22f8c4530af06e67b98
|
refs/heads/master
| 2023-08-03T23:08:34.520280
| 2023-08-01T18:09:58
| 2023-08-01T18:09:58
| 97,484,940
| 384
| 171
|
Apache-2.0
| 2023-09-14T14:48:40
| 2017-07-17T14:20:33
|
Java
|
UTF-8
|
Python
| false
| false
| 93,866
|
py
|
outputs.py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'EndpointElasticsearchSettings',
'EndpointKafkaSettings',
'EndpointKinesisSettings',
'EndpointMongodbSettings',
'EndpointRedisSettings',
'EndpointRedshiftSettings',
'EndpointS3Settings',
'GetEndpointElasticsearchSettingResult',
'GetEndpointKafkaSettingResult',
'GetEndpointKinesisSettingResult',
'GetEndpointMongodbSettingResult',
'GetEndpointRedisSettingResult',
'GetEndpointRedshiftSettingResult',
'GetEndpointS3SettingResult',
]
@pulumi.output_type
class EndpointElasticsearchSettings(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "endpointUri":
suggest = "endpoint_uri"
elif key == "serviceAccessRoleArn":
suggest = "service_access_role_arn"
elif key == "errorRetryDuration":
suggest = "error_retry_duration"
elif key == "fullLoadErrorPercentage":
suggest = "full_load_error_percentage"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EndpointElasticsearchSettings. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EndpointElasticsearchSettings.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EndpointElasticsearchSettings.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
endpoint_uri: str,
service_access_role_arn: str,
error_retry_duration: Optional[int] = None,
full_load_error_percentage: Optional[int] = None):
"""
:param str endpoint_uri: Endpoint for the OpenSearch cluster.
:param str service_access_role_arn: ARN of the IAM Role with permissions to write to the OpenSearch cluster.
:param int error_retry_duration: Maximum number of seconds for which DMS retries failed API requests to the OpenSearch cluster. Default is `300`.
:param int full_load_error_percentage: Maximum percentage of records that can fail to be written before a full load operation stops. Default is `10`.
"""
pulumi.set(__self__, "endpoint_uri", endpoint_uri)
pulumi.set(__self__, "service_access_role_arn", service_access_role_arn)
if error_retry_duration is not None:
pulumi.set(__self__, "error_retry_duration", error_retry_duration)
if full_load_error_percentage is not None:
pulumi.set(__self__, "full_load_error_percentage", full_load_error_percentage)
@property
@pulumi.getter(name="endpointUri")
def endpoint_uri(self) -> str:
"""
Endpoint for the OpenSearch cluster.
"""
return pulumi.get(self, "endpoint_uri")
@property
@pulumi.getter(name="serviceAccessRoleArn")
def service_access_role_arn(self) -> str:
"""
ARN of the IAM Role with permissions to write to the OpenSearch cluster.
"""
return pulumi.get(self, "service_access_role_arn")
@property
@pulumi.getter(name="errorRetryDuration")
def error_retry_duration(self) -> Optional[int]:
"""
Maximum number of seconds for which DMS retries failed API requests to the OpenSearch cluster. Default is `300`.
"""
return pulumi.get(self, "error_retry_duration")
@property
@pulumi.getter(name="fullLoadErrorPercentage")
def full_load_error_percentage(self) -> Optional[int]:
"""
Maximum percentage of records that can fail to be written before a full load operation stops. Default is `10`.
"""
return pulumi.get(self, "full_load_error_percentage")
@pulumi.output_type
class EndpointKafkaSettings(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "includeControlDetails":
suggest = "include_control_details"
elif key == "includeNullAndEmpty":
suggest = "include_null_and_empty"
elif key == "includePartitionValue":
suggest = "include_partition_value"
elif key == "includeTableAlterOperations":
suggest = "include_table_alter_operations"
elif key == "includeTransactionDetails":
suggest = "include_transaction_details"
elif key == "messageFormat":
suggest = "message_format"
elif key == "messageMaxBytes":
suggest = "message_max_bytes"
elif key == "noHexPrefix":
suggest = "no_hex_prefix"
elif key == "partitionIncludeSchemaTable":
suggest = "partition_include_schema_table"
elif key == "saslPassword":
suggest = "sasl_password"
elif key == "saslUsername":
suggest = "sasl_username"
elif key == "securityProtocol":
suggest = "security_protocol"
elif key == "sslCaCertificateArn":
suggest = "ssl_ca_certificate_arn"
elif key == "sslClientCertificateArn":
suggest = "ssl_client_certificate_arn"
elif key == "sslClientKeyArn":
suggest = "ssl_client_key_arn"
elif key == "sslClientKeyPassword":
suggest = "ssl_client_key_password"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EndpointKafkaSettings. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EndpointKafkaSettings.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EndpointKafkaSettings.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
broker: str,
include_control_details: Optional[bool] = None,
include_null_and_empty: Optional[bool] = None,
include_partition_value: Optional[bool] = None,
include_table_alter_operations: Optional[bool] = None,
include_transaction_details: Optional[bool] = None,
message_format: Optional[str] = None,
message_max_bytes: Optional[int] = None,
no_hex_prefix: Optional[bool] = None,
partition_include_schema_table: Optional[bool] = None,
sasl_password: Optional[str] = None,
sasl_username: Optional[str] = None,
security_protocol: Optional[str] = None,
ssl_ca_certificate_arn: Optional[str] = None,
ssl_client_certificate_arn: Optional[str] = None,
ssl_client_key_arn: Optional[str] = None,
ssl_client_key_password: Optional[str] = None,
topic: Optional[str] = None):
"""
:param str broker: Kafka broker location. Specify in the form broker-hostname-or-ip:port.
:param bool include_control_details: Shows detailed control information for table definition, column definition, and table and column changes in the Kafka message output. Default is `false`.
:param bool include_null_and_empty: Include NULL and empty columns for records migrated to the endpoint. Default is `false`.
:param bool include_partition_value: Shows the partition value within the Kafka message output unless the partition type is `schema-table-type`. Default is `false`.
:param bool include_table_alter_operations: Includes any data definition language (DDL) operations that change the table in the control data, such as `rename-table`, `drop-table`, `add-column`, `drop-column`, and `rename-column`. Default is `false`.
:param bool include_transaction_details: Provides detailed transaction information from the source database. This information includes a commit timestamp, a log position, and values for `transaction_id`, previous `transaction_id`, and `transaction_record_id` (the record offset within a transaction). Default is `false`.
:param str message_format: Output format for the records created on the endpoint. Message format is `JSON` (default) or `JSON_UNFORMATTED` (a single line with no tab).
:param int message_max_bytes: Maximum size in bytes for records created on the endpoint Default is `1,000,000`.
:param bool no_hex_prefix: Set this optional parameter to true to avoid adding a '0x' prefix to raw data in hexadecimal format. For example, by default, AWS DMS adds a '0x' prefix to the LOB column type in hexadecimal format moving from an Oracle source to a Kafka target. Use the `no_hex_prefix` endpoint setting to enable migration of RAW data type columns without adding the `'0x'` prefix.
:param bool partition_include_schema_table: Prefixes schema and table names to partition values, when the partition type is `primary-key-type`. Doing this increases data distribution among Kafka partitions. For example, suppose that a SysBench schema has thousands of tables and each table has only limited range for a primary key. In this case, the same primary key is sent from thousands of tables to the same partition, which causes throttling. Default is `false`.
:param str sasl_password: Secure password you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.
:param str sasl_username: Secure user name you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.
:param str security_protocol: Set secure connection to a Kafka target endpoint using Transport Layer Security (TLS). Options include `ssl-encryption`, `ssl-authentication`, and `sasl-ssl`. `sasl-ssl` requires `sasl_username` and `sasl_password`.
:param str ssl_ca_certificate_arn: ARN for the private certificate authority (CA) cert that AWS DMS uses to securely connect to your Kafka target endpoint.
:param str ssl_client_certificate_arn: ARN of the client certificate used to securely connect to a Kafka target endpoint.
:param str ssl_client_key_arn: ARN for the client private key used to securely connect to a Kafka target endpoint.
:param str ssl_client_key_password: Password for the client private key used to securely connect to a Kafka target endpoint.
:param str topic: Kafka topic for migration. Default is `kafka-default-topic`.
"""
pulumi.set(__self__, "broker", broker)
if include_control_details is not None:
pulumi.set(__self__, "include_control_details", include_control_details)
if include_null_and_empty is not None:
pulumi.set(__self__, "include_null_and_empty", include_null_and_empty)
if include_partition_value is not None:
pulumi.set(__self__, "include_partition_value", include_partition_value)
if include_table_alter_operations is not None:
pulumi.set(__self__, "include_table_alter_operations", include_table_alter_operations)
if include_transaction_details is not None:
pulumi.set(__self__, "include_transaction_details", include_transaction_details)
if message_format is not None:
pulumi.set(__self__, "message_format", message_format)
if message_max_bytes is not None:
pulumi.set(__self__, "message_max_bytes", message_max_bytes)
if no_hex_prefix is not None:
pulumi.set(__self__, "no_hex_prefix", no_hex_prefix)
if partition_include_schema_table is not None:
pulumi.set(__self__, "partition_include_schema_table", partition_include_schema_table)
if sasl_password is not None:
pulumi.set(__self__, "sasl_password", sasl_password)
if sasl_username is not None:
pulumi.set(__self__, "sasl_username", sasl_username)
if security_protocol is not None:
pulumi.set(__self__, "security_protocol", security_protocol)
if ssl_ca_certificate_arn is not None:
pulumi.set(__self__, "ssl_ca_certificate_arn", ssl_ca_certificate_arn)
if ssl_client_certificate_arn is not None:
pulumi.set(__self__, "ssl_client_certificate_arn", ssl_client_certificate_arn)
if ssl_client_key_arn is not None:
pulumi.set(__self__, "ssl_client_key_arn", ssl_client_key_arn)
if ssl_client_key_password is not None:
pulumi.set(__self__, "ssl_client_key_password", ssl_client_key_password)
if topic is not None:
pulumi.set(__self__, "topic", topic)
@property
@pulumi.getter
def broker(self) -> str:
"""
Kafka broker location. Specify in the form broker-hostname-or-ip:port.
"""
return pulumi.get(self, "broker")
@property
@pulumi.getter(name="includeControlDetails")
def include_control_details(self) -> Optional[bool]:
"""
Shows detailed control information for table definition, column definition, and table and column changes in the Kafka message output. Default is `false`.
"""
return pulumi.get(self, "include_control_details")
@property
@pulumi.getter(name="includeNullAndEmpty")
def include_null_and_empty(self) -> Optional[bool]:
"""
Include NULL and empty columns for records migrated to the endpoint. Default is `false`.
"""
return pulumi.get(self, "include_null_and_empty")
@property
@pulumi.getter(name="includePartitionValue")
def include_partition_value(self) -> Optional[bool]:
"""
Shows the partition value within the Kafka message output unless the partition type is `schema-table-type`. Default is `false`.
"""
return pulumi.get(self, "include_partition_value")
@property
@pulumi.getter(name="includeTableAlterOperations")
def include_table_alter_operations(self) -> Optional[bool]:
"""
Includes any data definition language (DDL) operations that change the table in the control data, such as `rename-table`, `drop-table`, `add-column`, `drop-column`, and `rename-column`. Default is `false`.
"""
return pulumi.get(self, "include_table_alter_operations")
@property
@pulumi.getter(name="includeTransactionDetails")
def include_transaction_details(self) -> Optional[bool]:
"""
Provides detailed transaction information from the source database. This information includes a commit timestamp, a log position, and values for `transaction_id`, previous `transaction_id`, and `transaction_record_id` (the record offset within a transaction). Default is `false`.
"""
return pulumi.get(self, "include_transaction_details")
@property
@pulumi.getter(name="messageFormat")
def message_format(self) -> Optional[str]:
"""
Output format for the records created on the endpoint. Message format is `JSON` (default) or `JSON_UNFORMATTED` (a single line with no tab).
"""
return pulumi.get(self, "message_format")
@property
@pulumi.getter(name="messageMaxBytes")
def message_max_bytes(self) -> Optional[int]:
"""
Maximum size in bytes for records created on the endpoint Default is `1,000,000`.
"""
return pulumi.get(self, "message_max_bytes")
@property
@pulumi.getter(name="noHexPrefix")
def no_hex_prefix(self) -> Optional[bool]:
"""
Set this optional parameter to true to avoid adding a '0x' prefix to raw data in hexadecimal format. For example, by default, AWS DMS adds a '0x' prefix to the LOB column type in hexadecimal format moving from an Oracle source to a Kafka target. Use the `no_hex_prefix` endpoint setting to enable migration of RAW data type columns without adding the `'0x'` prefix.
"""
return pulumi.get(self, "no_hex_prefix")
@property
@pulumi.getter(name="partitionIncludeSchemaTable")
def partition_include_schema_table(self) -> Optional[bool]:
"""
Prefixes schema and table names to partition values, when the partition type is `primary-key-type`. Doing this increases data distribution among Kafka partitions. For example, suppose that a SysBench schema has thousands of tables and each table has only limited range for a primary key. In this case, the same primary key is sent from thousands of tables to the same partition, which causes throttling. Default is `false`.
"""
return pulumi.get(self, "partition_include_schema_table")
@property
@pulumi.getter(name="saslPassword")
def sasl_password(self) -> Optional[str]:
"""
Secure password you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.
"""
return pulumi.get(self, "sasl_password")
@property
@pulumi.getter(name="saslUsername")
def sasl_username(self) -> Optional[str]:
"""
Secure user name you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.
"""
return pulumi.get(self, "sasl_username")
@property
@pulumi.getter(name="securityProtocol")
def security_protocol(self) -> Optional[str]:
"""
Set secure connection to a Kafka target endpoint using Transport Layer Security (TLS). Options include `ssl-encryption`, `ssl-authentication`, and `sasl-ssl`. `sasl-ssl` requires `sasl_username` and `sasl_password`.
"""
return pulumi.get(self, "security_protocol")
@property
@pulumi.getter(name="sslCaCertificateArn")
def ssl_ca_certificate_arn(self) -> Optional[str]:
"""
ARN for the private certificate authority (CA) cert that AWS DMS uses to securely connect to your Kafka target endpoint.
"""
return pulumi.get(self, "ssl_ca_certificate_arn")
@property
@pulumi.getter(name="sslClientCertificateArn")
def ssl_client_certificate_arn(self) -> Optional[str]:
"""
ARN of the client certificate used to securely connect to a Kafka target endpoint.
"""
return pulumi.get(self, "ssl_client_certificate_arn")
@property
@pulumi.getter(name="sslClientKeyArn")
def ssl_client_key_arn(self) -> Optional[str]:
"""
ARN for the client private key used to securely connect to a Kafka target endpoint.
"""
return pulumi.get(self, "ssl_client_key_arn")
@property
@pulumi.getter(name="sslClientKeyPassword")
def ssl_client_key_password(self) -> Optional[str]:
"""
Password for the client private key used to securely connect to a Kafka target endpoint.
"""
return pulumi.get(self, "ssl_client_key_password")
@property
@pulumi.getter
def topic(self) -> Optional[str]:
"""
Kafka topic for migration. Default is `kafka-default-topic`.
"""
return pulumi.get(self, "topic")
@pulumi.output_type
class EndpointKinesisSettings(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "includeControlDetails":
suggest = "include_control_details"
elif key == "includeNullAndEmpty":
suggest = "include_null_and_empty"
elif key == "includePartitionValue":
suggest = "include_partition_value"
elif key == "includeTableAlterOperations":
suggest = "include_table_alter_operations"
elif key == "includeTransactionDetails":
suggest = "include_transaction_details"
elif key == "messageFormat":
suggest = "message_format"
elif key == "partitionIncludeSchemaTable":
suggest = "partition_include_schema_table"
elif key == "serviceAccessRoleArn":
suggest = "service_access_role_arn"
elif key == "streamArn":
suggest = "stream_arn"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EndpointKinesisSettings. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EndpointKinesisSettings.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EndpointKinesisSettings.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
include_control_details: Optional[bool] = None,
include_null_and_empty: Optional[bool] = None,
include_partition_value: Optional[bool] = None,
include_table_alter_operations: Optional[bool] = None,
include_transaction_details: Optional[bool] = None,
message_format: Optional[str] = None,
partition_include_schema_table: Optional[bool] = None,
service_access_role_arn: Optional[str] = None,
stream_arn: Optional[str] = None):
"""
:param bool include_control_details: Shows detailed control information for table definition, column definition, and table and column changes in the Kinesis message output. Default is `false`.
:param bool include_null_and_empty: Include NULL and empty columns in the target. Default is `false`.
:param bool include_partition_value: Shows the partition value within the Kinesis message output, unless the partition type is schema-table-type. Default is `false`.
:param bool include_table_alter_operations: Includes any data definition language (DDL) operations that change the table in the control data. Default is `false`.
:param bool include_transaction_details: Provides detailed transaction information from the source database. Default is `false`.
:param str message_format: Output format for the records created. Default is `json`. Valid values are `json` and `json-unformatted` (a single line with no tab).
:param bool partition_include_schema_table: Prefixes schema and table names to partition values, when the partition type is primary-key-type. Default is `false`.
:param str service_access_role_arn: ARN of the IAM Role with permissions to write to the Kinesis data stream.
:param str stream_arn: ARN of the Kinesis data stream.
"""
if include_control_details is not None:
pulumi.set(__self__, "include_control_details", include_control_details)
if include_null_and_empty is not None:
pulumi.set(__self__, "include_null_and_empty", include_null_and_empty)
if include_partition_value is not None:
pulumi.set(__self__, "include_partition_value", include_partition_value)
if include_table_alter_operations is not None:
pulumi.set(__self__, "include_table_alter_operations", include_table_alter_operations)
if include_transaction_details is not None:
pulumi.set(__self__, "include_transaction_details", include_transaction_details)
if message_format is not None:
pulumi.set(__self__, "message_format", message_format)
if partition_include_schema_table is not None:
pulumi.set(__self__, "partition_include_schema_table", partition_include_schema_table)
if service_access_role_arn is not None:
pulumi.set(__self__, "service_access_role_arn", service_access_role_arn)
if stream_arn is not None:
pulumi.set(__self__, "stream_arn", stream_arn)
@property
@pulumi.getter(name="includeControlDetails")
def include_control_details(self) -> Optional[bool]:
"""
Shows detailed control information for table definition, column definition, and table and column changes in the Kinesis message output. Default is `false`.
"""
return pulumi.get(self, "include_control_details")
@property
@pulumi.getter(name="includeNullAndEmpty")
def include_null_and_empty(self) -> Optional[bool]:
"""
Include NULL and empty columns in the target. Default is `false`.
"""
return pulumi.get(self, "include_null_and_empty")
@property
@pulumi.getter(name="includePartitionValue")
def include_partition_value(self) -> Optional[bool]:
"""
Shows the partition value within the Kinesis message output, unless the partition type is schema-table-type. Default is `false`.
"""
return pulumi.get(self, "include_partition_value")
@property
@pulumi.getter(name="includeTableAlterOperations")
def include_table_alter_operations(self) -> Optional[bool]:
"""
Includes any data definition language (DDL) operations that change the table in the control data. Default is `false`.
"""
return pulumi.get(self, "include_table_alter_operations")
@property
@pulumi.getter(name="includeTransactionDetails")
def include_transaction_details(self) -> Optional[bool]:
"""
Provides detailed transaction information from the source database. Default is `false`.
"""
return pulumi.get(self, "include_transaction_details")
@property
@pulumi.getter(name="messageFormat")
def message_format(self) -> Optional[str]:
"""
Output format for the records created. Default is `json`. Valid values are `json` and `json-unformatted` (a single line with no tab).
"""
return pulumi.get(self, "message_format")
@property
@pulumi.getter(name="partitionIncludeSchemaTable")
def partition_include_schema_table(self) -> Optional[bool]:
"""
Prefixes schema and table names to partition values, when the partition type is primary-key-type. Default is `false`.
"""
return pulumi.get(self, "partition_include_schema_table")
@property
@pulumi.getter(name="serviceAccessRoleArn")
def service_access_role_arn(self) -> Optional[str]:
"""
ARN of the IAM Role with permissions to write to the Kinesis data stream.
"""
return pulumi.get(self, "service_access_role_arn")
@property
@pulumi.getter(name="streamArn")
def stream_arn(self) -> Optional[str]:
"""
ARN of the Kinesis data stream.
"""
return pulumi.get(self, "stream_arn")
@pulumi.output_type
class EndpointMongodbSettings(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "authMechanism":
suggest = "auth_mechanism"
elif key == "authSource":
suggest = "auth_source"
elif key == "authType":
suggest = "auth_type"
elif key == "docsToInvestigate":
suggest = "docs_to_investigate"
elif key == "extractDocId":
suggest = "extract_doc_id"
elif key == "nestingLevel":
suggest = "nesting_level"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EndpointMongodbSettings. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EndpointMongodbSettings.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EndpointMongodbSettings.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
auth_mechanism: Optional[str] = None,
auth_source: Optional[str] = None,
auth_type: Optional[str] = None,
docs_to_investigate: Optional[str] = None,
extract_doc_id: Optional[str] = None,
nesting_level: Optional[str] = None):
"""
:param str auth_mechanism: Authentication mechanism to access the MongoDB source endpoint. Default is `default`.
:param str auth_source: Authentication database name. Not used when `auth_type` is `no`. Default is `admin`.
:param str auth_type: Authentication type to access the MongoDB source endpoint. Default is `password`.
:param str docs_to_investigate: Number of documents to preview to determine the document organization. Use this setting when `nesting_level` is set to `one`. Default is `1000`.
:param str extract_doc_id: Document ID. Use this setting when `nesting_level` is set to `none`. Default is `false`.
:param str nesting_level: Specifies either document or table mode. Default is `none`. Valid values are `one` (table mode) and `none` (document mode).
"""
if auth_mechanism is not None:
pulumi.set(__self__, "auth_mechanism", auth_mechanism)
if auth_source is not None:
pulumi.set(__self__, "auth_source", auth_source)
if auth_type is not None:
pulumi.set(__self__, "auth_type", auth_type)
if docs_to_investigate is not None:
pulumi.set(__self__, "docs_to_investigate", docs_to_investigate)
if extract_doc_id is not None:
pulumi.set(__self__, "extract_doc_id", extract_doc_id)
if nesting_level is not None:
pulumi.set(__self__, "nesting_level", nesting_level)
@property
@pulumi.getter(name="authMechanism")
def auth_mechanism(self) -> Optional[str]:
"""
Authentication mechanism to access the MongoDB source endpoint. Default is `default`.
"""
return pulumi.get(self, "auth_mechanism")
@property
@pulumi.getter(name="authSource")
def auth_source(self) -> Optional[str]:
"""
Authentication database name. Not used when `auth_type` is `no`. Default is `admin`.
"""
return pulumi.get(self, "auth_source")
@property
@pulumi.getter(name="authType")
def auth_type(self) -> Optional[str]:
"""
Authentication type to access the MongoDB source endpoint. Default is `password`.
"""
return pulumi.get(self, "auth_type")
@property
@pulumi.getter(name="docsToInvestigate")
def docs_to_investigate(self) -> Optional[str]:
"""
Number of documents to preview to determine the document organization. Use this setting when `nesting_level` is set to `one`. Default is `1000`.
"""
return pulumi.get(self, "docs_to_investigate")
@property
@pulumi.getter(name="extractDocId")
def extract_doc_id(self) -> Optional[str]:
"""
Document ID. Use this setting when `nesting_level` is set to `none`. Default is `false`.
"""
return pulumi.get(self, "extract_doc_id")
@property
@pulumi.getter(name="nestingLevel")
def nesting_level(self) -> Optional[str]:
"""
Specifies either document or table mode. Default is `none`. Valid values are `one` (table mode) and `none` (document mode).
"""
return pulumi.get(self, "nesting_level")
@pulumi.output_type
class EndpointRedisSettings(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "authType":
suggest = "auth_type"
elif key == "serverName":
suggest = "server_name"
elif key == "authPassword":
suggest = "auth_password"
elif key == "authUserName":
suggest = "auth_user_name"
elif key == "sslCaCertificateArn":
suggest = "ssl_ca_certificate_arn"
elif key == "sslSecurityProtocol":
suggest = "ssl_security_protocol"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EndpointRedisSettings. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EndpointRedisSettings.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EndpointRedisSettings.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
auth_type: str,
port: int,
server_name: str,
auth_password: Optional[str] = None,
auth_user_name: Optional[str] = None,
ssl_ca_certificate_arn: Optional[str] = None,
ssl_security_protocol: Optional[str] = None):
"""
:param str auth_type: The type of authentication to perform when connecting to a Redis target. Options include `none`, `auth-token`, and `auth-role`. The `auth-token` option requires an `auth_password` value to be provided. The `auth-role` option requires `auth_user_name` and `auth_password` values to be provided.
:param int port: Transmission Control Protocol (TCP) port for the endpoint.
:param str server_name: Fully qualified domain name of the endpoint.
:param str auth_password: The password provided with the auth-role and auth-token options of the AuthType setting for a Redis target endpoint.
:param str auth_user_name: The username provided with the `auth-role` option of the AuthType setting for a Redis target endpoint.
:param str ssl_ca_certificate_arn: The Amazon Resource Name (ARN) for the certificate authority (CA) that DMS uses to connect to your Redis target endpoint.
:param str ssl_security_protocol: The plaintext option doesn't provide Transport Layer Security (TLS) encryption for traffic between endpoint and database. Options include `plaintext`, `ssl-encryption`. The default is `ssl-encryption`.
"""
pulumi.set(__self__, "auth_type", auth_type)
pulumi.set(__self__, "port", port)
pulumi.set(__self__, "server_name", server_name)
if auth_password is not None:
pulumi.set(__self__, "auth_password", auth_password)
if auth_user_name is not None:
pulumi.set(__self__, "auth_user_name", auth_user_name)
if ssl_ca_certificate_arn is not None:
pulumi.set(__self__, "ssl_ca_certificate_arn", ssl_ca_certificate_arn)
if ssl_security_protocol is not None:
pulumi.set(__self__, "ssl_security_protocol", ssl_security_protocol)
@property
@pulumi.getter(name="authType")
def auth_type(self) -> str:
"""
The type of authentication to perform when connecting to a Redis target. Options include `none`, `auth-token`, and `auth-role`. The `auth-token` option requires an `auth_password` value to be provided. The `auth-role` option requires `auth_user_name` and `auth_password` values to be provided.
"""
return pulumi.get(self, "auth_type")
@property
@pulumi.getter
def port(self) -> int:
"""
Transmission Control Protocol (TCP) port for the endpoint.
"""
return pulumi.get(self, "port")
@property
@pulumi.getter(name="serverName")
def server_name(self) -> str:
"""
Fully qualified domain name of the endpoint.
"""
return pulumi.get(self, "server_name")
@property
@pulumi.getter(name="authPassword")
def auth_password(self) -> Optional[str]:
"""
The password provided with the auth-role and auth-token options of the AuthType setting for a Redis target endpoint.
"""
return pulumi.get(self, "auth_password")
@property
@pulumi.getter(name="authUserName")
def auth_user_name(self) -> Optional[str]:
"""
The username provided with the `auth-role` option of the AuthType setting for a Redis target endpoint.
"""
return pulumi.get(self, "auth_user_name")
@property
@pulumi.getter(name="sslCaCertificateArn")
def ssl_ca_certificate_arn(self) -> Optional[str]:
"""
The Amazon Resource Name (ARN) for the certificate authority (CA) that DMS uses to connect to your Redis target endpoint.
"""
return pulumi.get(self, "ssl_ca_certificate_arn")
@property
@pulumi.getter(name="sslSecurityProtocol")
def ssl_security_protocol(self) -> Optional[str]:
"""
The plaintext option doesn't provide Transport Layer Security (TLS) encryption for traffic between endpoint and database. Options include `plaintext`, `ssl-encryption`. The default is `ssl-encryption`.
"""
return pulumi.get(self, "ssl_security_protocol")
@pulumi.output_type
class EndpointRedshiftSettings(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "bucketFolder":
suggest = "bucket_folder"
elif key == "bucketName":
suggest = "bucket_name"
elif key == "encryptionMode":
suggest = "encryption_mode"
elif key == "serverSideEncryptionKmsKeyId":
suggest = "server_side_encryption_kms_key_id"
elif key == "serviceAccessRoleArn":
suggest = "service_access_role_arn"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EndpointRedshiftSettings. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EndpointRedshiftSettings.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EndpointRedshiftSettings.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
bucket_folder: Optional[str] = None,
bucket_name: Optional[str] = None,
encryption_mode: Optional[str] = None,
server_side_encryption_kms_key_id: Optional[str] = None,
service_access_role_arn: Optional[str] = None):
"""
:param str bucket_folder: Custom S3 Bucket Object prefix for intermediate storage.
:param str bucket_name: Custom S3 Bucket name for intermediate storage.
:param str encryption_mode: The server-side encryption mode that you want to encrypt your intermediate .csv object files copied to S3. Defaults to `SSE_S3`. Valid values are `SSE_S3` and `SSE_KMS`.
:param str server_side_encryption_kms_key_id: ARN or Id of KMS Key to use when `encryption_mode` is `SSE_KMS`.
:param str service_access_role_arn: Amazon Resource Name (ARN) of the IAM Role with permissions to read from or write to the S3 Bucket for intermediate storage.
"""
if bucket_folder is not None:
pulumi.set(__self__, "bucket_folder", bucket_folder)
if bucket_name is not None:
pulumi.set(__self__, "bucket_name", bucket_name)
if encryption_mode is not None:
pulumi.set(__self__, "encryption_mode", encryption_mode)
if server_side_encryption_kms_key_id is not None:
pulumi.set(__self__, "server_side_encryption_kms_key_id", server_side_encryption_kms_key_id)
if service_access_role_arn is not None:
pulumi.set(__self__, "service_access_role_arn", service_access_role_arn)
@property
@pulumi.getter(name="bucketFolder")
def bucket_folder(self) -> Optional[str]:
"""
Custom S3 Bucket Object prefix for intermediate storage.
"""
return pulumi.get(self, "bucket_folder")
@property
@pulumi.getter(name="bucketName")
def bucket_name(self) -> Optional[str]:
"""
Custom S3 Bucket name for intermediate storage.
"""
return pulumi.get(self, "bucket_name")
@property
@pulumi.getter(name="encryptionMode")
def encryption_mode(self) -> Optional[str]:
"""
The server-side encryption mode that you want to encrypt your intermediate .csv object files copied to S3. Defaults to `SSE_S3`. Valid values are `SSE_S3` and `SSE_KMS`.
"""
return pulumi.get(self, "encryption_mode")
@property
@pulumi.getter(name="serverSideEncryptionKmsKeyId")
def server_side_encryption_kms_key_id(self) -> Optional[str]:
"""
ARN or Id of KMS Key to use when `encryption_mode` is `SSE_KMS`.
"""
return pulumi.get(self, "server_side_encryption_kms_key_id")
@property
@pulumi.getter(name="serviceAccessRoleArn")
def service_access_role_arn(self) -> Optional[str]:
"""
Amazon Resource Name (ARN) of the IAM Role with permissions to read from or write to the S3 Bucket for intermediate storage.
"""
return pulumi.get(self, "service_access_role_arn")
@pulumi.output_type
class EndpointS3Settings(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "addColumnName":
suggest = "add_column_name"
elif key == "bucketFolder":
suggest = "bucket_folder"
elif key == "bucketName":
suggest = "bucket_name"
elif key == "cannedAclForObjects":
suggest = "canned_acl_for_objects"
elif key == "cdcInsertsAndUpdates":
suggest = "cdc_inserts_and_updates"
elif key == "cdcInsertsOnly":
suggest = "cdc_inserts_only"
elif key == "cdcMaxBatchInterval":
suggest = "cdc_max_batch_interval"
elif key == "cdcMinFileSize":
suggest = "cdc_min_file_size"
elif key == "cdcPath":
suggest = "cdc_path"
elif key == "compressionType":
suggest = "compression_type"
elif key == "csvDelimiter":
suggest = "csv_delimiter"
elif key == "csvNoSupValue":
suggest = "csv_no_sup_value"
elif key == "csvNullValue":
suggest = "csv_null_value"
elif key == "csvRowDelimiter":
suggest = "csv_row_delimiter"
elif key == "dataFormat":
suggest = "data_format"
elif key == "dataPageSize":
suggest = "data_page_size"
elif key == "datePartitionDelimiter":
suggest = "date_partition_delimiter"
elif key == "datePartitionEnabled":
suggest = "date_partition_enabled"
elif key == "datePartitionSequence":
suggest = "date_partition_sequence"
elif key == "dictPageSizeLimit":
suggest = "dict_page_size_limit"
elif key == "enableStatistics":
suggest = "enable_statistics"
elif key == "encodingType":
suggest = "encoding_type"
elif key == "encryptionMode":
suggest = "encryption_mode"
elif key == "externalTableDefinition":
suggest = "external_table_definition"
elif key == "ignoreHeaderRows":
suggest = "ignore_header_rows"
elif key == "ignoreHeadersRow":
suggest = "ignore_headers_row"
elif key == "includeOpForFullLoad":
suggest = "include_op_for_full_load"
elif key == "maxFileSize":
suggest = "max_file_size"
elif key == "parquetTimestampInMillisecond":
suggest = "parquet_timestamp_in_millisecond"
elif key == "parquetVersion":
suggest = "parquet_version"
elif key == "preserveTransactions":
suggest = "preserve_transactions"
elif key == "rowGroupLength":
suggest = "row_group_length"
elif key == "serverSideEncryptionKmsKeyId":
suggest = "server_side_encryption_kms_key_id"
elif key == "serviceAccessRoleArn":
suggest = "service_access_role_arn"
elif key == "timestampColumnName":
suggest = "timestamp_column_name"
elif key == "useCsvNoSupValue":
suggest = "use_csv_no_sup_value"
elif key == "useTaskStartTimeForFullLoadTimestamp":
suggest = "use_task_start_time_for_full_load_timestamp"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EndpointS3Settings. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EndpointS3Settings.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EndpointS3Settings.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
add_column_name: Optional[bool] = None,
bucket_folder: Optional[str] = None,
bucket_name: Optional[str] = None,
canned_acl_for_objects: Optional[str] = None,
cdc_inserts_and_updates: Optional[bool] = None,
cdc_inserts_only: Optional[bool] = None,
cdc_max_batch_interval: Optional[int] = None,
cdc_min_file_size: Optional[int] = None,
cdc_path: Optional[str] = None,
compression_type: Optional[str] = None,
csv_delimiter: Optional[str] = None,
csv_no_sup_value: Optional[str] = None,
csv_null_value: Optional[str] = None,
csv_row_delimiter: Optional[str] = None,
data_format: Optional[str] = None,
data_page_size: Optional[int] = None,
date_partition_delimiter: Optional[str] = None,
date_partition_enabled: Optional[bool] = None,
date_partition_sequence: Optional[str] = None,
dict_page_size_limit: Optional[int] = None,
enable_statistics: Optional[bool] = None,
encoding_type: Optional[str] = None,
encryption_mode: Optional[str] = None,
external_table_definition: Optional[str] = None,
ignore_header_rows: Optional[int] = None,
ignore_headers_row: Optional[int] = None,
include_op_for_full_load: Optional[bool] = None,
max_file_size: Optional[int] = None,
parquet_timestamp_in_millisecond: Optional[bool] = None,
parquet_version: Optional[str] = None,
preserve_transactions: Optional[bool] = None,
rfc4180: Optional[bool] = None,
row_group_length: Optional[int] = None,
server_side_encryption_kms_key_id: Optional[str] = None,
service_access_role_arn: Optional[str] = None,
timestamp_column_name: Optional[str] = None,
use_csv_no_sup_value: Optional[bool] = None,
use_task_start_time_for_full_load_timestamp: Optional[bool] = None):
"""
:param bool add_column_name: Whether to add column name information to the .csv output file. Default is `false`.
:param str bucket_folder: Custom S3 Bucket Object prefix for intermediate storage.
:param str bucket_name: Custom S3 Bucket name for intermediate storage.
:param str canned_acl_for_objects: Predefined (canned) access control list for objects created in an S3 bucket. Valid values include `none`, `private`, `public-read`, `public-read-write`, `authenticated-read`, `aws-exec-read`, `bucket-owner-read`, and `bucket-owner-full-control`. Default is `none`.
:param bool cdc_inserts_and_updates: Whether to write insert and update operations to .csv or .parquet output files. Default is `false`.
:param bool cdc_inserts_only: Whether to write insert operations to .csv or .parquet output files. Default is `false`.
:param int cdc_max_batch_interval: Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3. Default is `60`.
:param int cdc_min_file_size: Minimum file size condition as defined in kilobytes to output a file to Amazon S3. Default is `32000`. **NOTE:** Previously, this setting was measured in megabytes but now represents kilobytes. Update configurations accordingly.
:param str cdc_path: Folder path of CDC files. For an S3 source, this setting is required if a task captures change data; otherwise, it's optional. If `cdc_path` is set, AWS DMS reads CDC files from this path and replicates the data changes to the target endpoint. Supported in AWS DMS versions 3.4.2 and later.
:param str compression_type: Set to compress target files. Default is `NONE`. Valid values are `GZIP` and `NONE`.
:param str csv_delimiter: Delimiter used to separate columns in the source files. Default is `,`.
:param str csv_no_sup_value: String to use for all columns not included in the supplemental log.
:param str csv_null_value: String to as null when writing to the target.
:param str csv_row_delimiter: Delimiter used to separate rows in the source files. Default is `\\n`.
:param str data_format: Output format for the files that AWS DMS uses to create S3 objects. Valid values are `csv` and `parquet`. Default is `csv`.
:param int data_page_size: Size of one data page in bytes. Default is `1048576` (1 MiB).
:param str date_partition_delimiter: Date separating delimiter to use during folder partitioning. Valid values are `SLASH`, `UNDERSCORE`, `DASH`, and `NONE`. Default is `SLASH`.
:param bool date_partition_enabled: Partition S3 bucket folders based on transaction commit dates. Default is `false`.
:param str date_partition_sequence: Date format to use during folder partitioning. Use this parameter when `date_partition_enabled` is set to true. Valid values are `YYYYMMDD`, `YYYYMMDDHH`, `YYYYMM`, `MMYYYYDD`, and `DDMMYYYY`. Default is `YYYYMMDD`.
:param int dict_page_size_limit: Maximum size in bytes of an encoded dictionary page of a column. Default is `1048576` (1 MiB).
:param bool enable_statistics: Whether to enable statistics for Parquet pages and row groups. Default is `true`.
:param str encoding_type: Type of encoding to use. Value values are `rle_dictionary`, `plain`, and `plain_dictionary`. Default is `rle_dictionary`.
:param str encryption_mode: The server-side encryption mode that you want to encrypt your intermediate .csv object files copied to S3. Defaults to `SSE_S3`. Valid values are `SSE_S3` and `SSE_KMS`.
:param str external_table_definition: JSON document that describes how AWS DMS should interpret the data.
:param int ignore_header_rows: When this value is set to `1`, DMS ignores the first row header in a .csv file. Default is `0`.
:param int ignore_headers_row: Deprecated. This setting has no effect. Will be removed in a future version.
:param bool include_op_for_full_load: Whether to enable a full load to write INSERT operations to the .csv output files only to indicate how the rows were added to the source database. Default is `false`.
:param int max_file_size: Maximum size (in KB) of any .csv file to be created while migrating to an S3 target during full load. Valid values are from `1` to `1048576`. Default is `1048576` (1 GB).
:param bool parquet_timestamp_in_millisecond: Specifies the precision of any TIMESTAMP column values written to an S3 object file in .parquet format. Default is `false`.
:param str parquet_version: Version of the .parquet file format. Default is `parquet-1-0`. Valid values are `parquet-1-0` and `parquet-2-0`.
:param bool preserve_transactions: Whether DMS saves the transaction order for a CDC load on the S3 target specified by `cdc_path`. Default is `false`.
:param bool rfc4180: For an S3 source, whether each leading double quotation mark has to be followed by an ending double quotation mark. Default is `true`.
:param int row_group_length: Number of rows in a row group. Default is `10000`.
:param str server_side_encryption_kms_key_id: ARN or Id of KMS Key to use when `encryption_mode` is `SSE_KMS`.
:param str service_access_role_arn: ARN of the IAM Role with permissions to write to the OpenSearch cluster.
:param str timestamp_column_name: Column to add with timestamp information to the endpoint data for an Amazon S3 target.
:param bool use_csv_no_sup_value: Whether to use `csv_no_sup_value` for columns not included in the supplemental log.
:param bool use_task_start_time_for_full_load_timestamp: When set to true, uses the task start time as the timestamp column value instead of the time data is written to target. For full load, when set to true, each row of the timestamp column contains the task start time. For CDC loads, each row of the timestamp column contains the transaction commit time. When set to false, the full load timestamp in the timestamp column increments with the time data arrives at the target. Default is `false`.
"""
if add_column_name is not None:
pulumi.set(__self__, "add_column_name", add_column_name)
if bucket_folder is not None:
pulumi.set(__self__, "bucket_folder", bucket_folder)
if bucket_name is not None:
pulumi.set(__self__, "bucket_name", bucket_name)
if canned_acl_for_objects is not None:
pulumi.set(__self__, "canned_acl_for_objects", canned_acl_for_objects)
if cdc_inserts_and_updates is not None:
pulumi.set(__self__, "cdc_inserts_and_updates", cdc_inserts_and_updates)
if cdc_inserts_only is not None:
pulumi.set(__self__, "cdc_inserts_only", cdc_inserts_only)
if cdc_max_batch_interval is not None:
pulumi.set(__self__, "cdc_max_batch_interval", cdc_max_batch_interval)
if cdc_min_file_size is not None:
pulumi.set(__self__, "cdc_min_file_size", cdc_min_file_size)
if cdc_path is not None:
pulumi.set(__self__, "cdc_path", cdc_path)
if compression_type is not None:
pulumi.set(__self__, "compression_type", compression_type)
if csv_delimiter is not None:
pulumi.set(__self__, "csv_delimiter", csv_delimiter)
if csv_no_sup_value is not None:
pulumi.set(__self__, "csv_no_sup_value", csv_no_sup_value)
if csv_null_value is not None:
pulumi.set(__self__, "csv_null_value", csv_null_value)
if csv_row_delimiter is not None:
pulumi.set(__self__, "csv_row_delimiter", csv_row_delimiter)
if data_format is not None:
pulumi.set(__self__, "data_format", data_format)
if data_page_size is not None:
pulumi.set(__self__, "data_page_size", data_page_size)
if date_partition_delimiter is not None:
pulumi.set(__self__, "date_partition_delimiter", date_partition_delimiter)
if date_partition_enabled is not None:
pulumi.set(__self__, "date_partition_enabled", date_partition_enabled)
if date_partition_sequence is not None:
pulumi.set(__self__, "date_partition_sequence", date_partition_sequence)
if dict_page_size_limit is not None:
pulumi.set(__self__, "dict_page_size_limit", dict_page_size_limit)
if enable_statistics is not None:
pulumi.set(__self__, "enable_statistics", enable_statistics)
if encoding_type is not None:
pulumi.set(__self__, "encoding_type", encoding_type)
if encryption_mode is not None:
pulumi.set(__self__, "encryption_mode", encryption_mode)
if external_table_definition is not None:
pulumi.set(__self__, "external_table_definition", external_table_definition)
if ignore_header_rows is not None:
pulumi.set(__self__, "ignore_header_rows", ignore_header_rows)
if ignore_headers_row is not None:
pulumi.set(__self__, "ignore_headers_row", ignore_headers_row)
if include_op_for_full_load is not None:
pulumi.set(__self__, "include_op_for_full_load", include_op_for_full_load)
if max_file_size is not None:
pulumi.set(__self__, "max_file_size", max_file_size)
if parquet_timestamp_in_millisecond is not None:
pulumi.set(__self__, "parquet_timestamp_in_millisecond", parquet_timestamp_in_millisecond)
if parquet_version is not None:
pulumi.set(__self__, "parquet_version", parquet_version)
if preserve_transactions is not None:
pulumi.set(__self__, "preserve_transactions", preserve_transactions)
if rfc4180 is not None:
pulumi.set(__self__, "rfc4180", rfc4180)
if row_group_length is not None:
pulumi.set(__self__, "row_group_length", row_group_length)
if server_side_encryption_kms_key_id is not None:
pulumi.set(__self__, "server_side_encryption_kms_key_id", server_side_encryption_kms_key_id)
if service_access_role_arn is not None:
pulumi.set(__self__, "service_access_role_arn", service_access_role_arn)
if timestamp_column_name is not None:
pulumi.set(__self__, "timestamp_column_name", timestamp_column_name)
if use_csv_no_sup_value is not None:
pulumi.set(__self__, "use_csv_no_sup_value", use_csv_no_sup_value)
if use_task_start_time_for_full_load_timestamp is not None:
pulumi.set(__self__, "use_task_start_time_for_full_load_timestamp", use_task_start_time_for_full_load_timestamp)
@property
@pulumi.getter(name="addColumnName")
def add_column_name(self) -> Optional[bool]:
"""
Whether to add column name information to the .csv output file. Default is `false`.
"""
return pulumi.get(self, "add_column_name")
@property
@pulumi.getter(name="bucketFolder")
def bucket_folder(self) -> Optional[str]:
"""
Custom S3 Bucket Object prefix for intermediate storage.
"""
return pulumi.get(self, "bucket_folder")
@property
@pulumi.getter(name="bucketName")
def bucket_name(self) -> Optional[str]:
"""
Custom S3 Bucket name for intermediate storage.
"""
return pulumi.get(self, "bucket_name")
@property
@pulumi.getter(name="cannedAclForObjects")
def canned_acl_for_objects(self) -> Optional[str]:
"""
Predefined (canned) access control list for objects created in an S3 bucket. Valid values include `none`, `private`, `public-read`, `public-read-write`, `authenticated-read`, `aws-exec-read`, `bucket-owner-read`, and `bucket-owner-full-control`. Default is `none`.
"""
return pulumi.get(self, "canned_acl_for_objects")
@property
@pulumi.getter(name="cdcInsertsAndUpdates")
def cdc_inserts_and_updates(self) -> Optional[bool]:
"""
Whether to write insert and update operations to .csv or .parquet output files. Default is `false`.
"""
return pulumi.get(self, "cdc_inserts_and_updates")
@property
@pulumi.getter(name="cdcInsertsOnly")
def cdc_inserts_only(self) -> Optional[bool]:
"""
Whether to write insert operations to .csv or .parquet output files. Default is `false`.
"""
return pulumi.get(self, "cdc_inserts_only")
@property
@pulumi.getter(name="cdcMaxBatchInterval")
def cdc_max_batch_interval(self) -> Optional[int]:
"""
Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3. Default is `60`.
"""
return pulumi.get(self, "cdc_max_batch_interval")
@property
@pulumi.getter(name="cdcMinFileSize")
def cdc_min_file_size(self) -> Optional[int]:
"""
Minimum file size condition as defined in kilobytes to output a file to Amazon S3. Default is `32000`. **NOTE:** Previously, this setting was measured in megabytes but now represents kilobytes. Update configurations accordingly.
"""
return pulumi.get(self, "cdc_min_file_size")
@property
@pulumi.getter(name="cdcPath")
def cdc_path(self) -> Optional[str]:
"""
Folder path of CDC files. For an S3 source, this setting is required if a task captures change data; otherwise, it's optional. If `cdc_path` is set, AWS DMS reads CDC files from this path and replicates the data changes to the target endpoint. Supported in AWS DMS versions 3.4.2 and later.
"""
return pulumi.get(self, "cdc_path")
@property
@pulumi.getter(name="compressionType")
def compression_type(self) -> Optional[str]:
"""
Set to compress target files. Default is `NONE`. Valid values are `GZIP` and `NONE`.
"""
return pulumi.get(self, "compression_type")
@property
@pulumi.getter(name="csvDelimiter")
def csv_delimiter(self) -> Optional[str]:
"""
Delimiter used to separate columns in the source files. Default is `,`.
"""
return pulumi.get(self, "csv_delimiter")
@property
@pulumi.getter(name="csvNoSupValue")
def csv_no_sup_value(self) -> Optional[str]:
"""
String to use for all columns not included in the supplemental log.
"""
return pulumi.get(self, "csv_no_sup_value")
@property
@pulumi.getter(name="csvNullValue")
def csv_null_value(self) -> Optional[str]:
"""
String to as null when writing to the target.
"""
return pulumi.get(self, "csv_null_value")
@property
@pulumi.getter(name="csvRowDelimiter")
def csv_row_delimiter(self) -> Optional[str]:
"""
Delimiter used to separate rows in the source files. Default is `\\n`.
"""
return pulumi.get(self, "csv_row_delimiter")
@property
@pulumi.getter(name="dataFormat")
def data_format(self) -> Optional[str]:
"""
Output format for the files that AWS DMS uses to create S3 objects. Valid values are `csv` and `parquet`. Default is `csv`.
"""
return pulumi.get(self, "data_format")
@property
@pulumi.getter(name="dataPageSize")
def data_page_size(self) -> Optional[int]:
"""
Size of one data page in bytes. Default is `1048576` (1 MiB).
"""
return pulumi.get(self, "data_page_size")
@property
@pulumi.getter(name="datePartitionDelimiter")
def date_partition_delimiter(self) -> Optional[str]:
"""
Date separating delimiter to use during folder partitioning. Valid values are `SLASH`, `UNDERSCORE`, `DASH`, and `NONE`. Default is `SLASH`.
"""
return pulumi.get(self, "date_partition_delimiter")
@property
@pulumi.getter(name="datePartitionEnabled")
def date_partition_enabled(self) -> Optional[bool]:
"""
Partition S3 bucket folders based on transaction commit dates. Default is `false`.
"""
return pulumi.get(self, "date_partition_enabled")
@property
@pulumi.getter(name="datePartitionSequence")
def date_partition_sequence(self) -> Optional[str]:
"""
Date format to use during folder partitioning. Use this parameter when `date_partition_enabled` is set to true. Valid values are `YYYYMMDD`, `YYYYMMDDHH`, `YYYYMM`, `MMYYYYDD`, and `DDMMYYYY`. Default is `YYYYMMDD`.
"""
return pulumi.get(self, "date_partition_sequence")
@property
@pulumi.getter(name="dictPageSizeLimit")
def dict_page_size_limit(self) -> Optional[int]:
"""
Maximum size in bytes of an encoded dictionary page of a column. Default is `1048576` (1 MiB).
"""
return pulumi.get(self, "dict_page_size_limit")
@property
@pulumi.getter(name="enableStatistics")
def enable_statistics(self) -> Optional[bool]:
"""
Whether to enable statistics for Parquet pages and row groups. Default is `true`.
"""
return pulumi.get(self, "enable_statistics")
@property
@pulumi.getter(name="encodingType")
def encoding_type(self) -> Optional[str]:
"""
Type of encoding to use. Value values are `rle_dictionary`, `plain`, and `plain_dictionary`. Default is `rle_dictionary`.
"""
return pulumi.get(self, "encoding_type")
@property
@pulumi.getter(name="encryptionMode")
def encryption_mode(self) -> Optional[str]:
"""
The server-side encryption mode that you want to encrypt your intermediate .csv object files copied to S3. Defaults to `SSE_S3`. Valid values are `SSE_S3` and `SSE_KMS`.
"""
return pulumi.get(self, "encryption_mode")
@property
@pulumi.getter(name="externalTableDefinition")
def external_table_definition(self) -> Optional[str]:
"""
JSON document that describes how AWS DMS should interpret the data.
"""
return pulumi.get(self, "external_table_definition")
@property
@pulumi.getter(name="ignoreHeaderRows")
def ignore_header_rows(self) -> Optional[int]:
"""
When this value is set to `1`, DMS ignores the first row header in a .csv file. Default is `0`.
"""
return pulumi.get(self, "ignore_header_rows")
@property
@pulumi.getter(name="ignoreHeadersRow")
def ignore_headers_row(self) -> Optional[int]:
"""
Deprecated. This setting has no effect. Will be removed in a future version.
"""
return pulumi.get(self, "ignore_headers_row")
@property
@pulumi.getter(name="includeOpForFullLoad")
def include_op_for_full_load(self) -> Optional[bool]:
"""
Whether to enable a full load to write INSERT operations to the .csv output files only to indicate how the rows were added to the source database. Default is `false`.
"""
return pulumi.get(self, "include_op_for_full_load")
@property
@pulumi.getter(name="maxFileSize")
def max_file_size(self) -> Optional[int]:
"""
Maximum size (in KB) of any .csv file to be created while migrating to an S3 target during full load. Valid values are from `1` to `1048576`. Default is `1048576` (1 GB).
"""
return pulumi.get(self, "max_file_size")
@property
@pulumi.getter(name="parquetTimestampInMillisecond")
def parquet_timestamp_in_millisecond(self) -> Optional[bool]:
"""
Specifies the precision of any TIMESTAMP column values written to an S3 object file in .parquet format. Default is `false`.
"""
return pulumi.get(self, "parquet_timestamp_in_millisecond")
@property
@pulumi.getter(name="parquetVersion")
def parquet_version(self) -> Optional[str]:
"""
Version of the .parquet file format. Default is `parquet-1-0`. Valid values are `parquet-1-0` and `parquet-2-0`.
"""
return pulumi.get(self, "parquet_version")
@property
@pulumi.getter(name="preserveTransactions")
def preserve_transactions(self) -> Optional[bool]:
"""
Whether DMS saves the transaction order for a CDC load on the S3 target specified by `cdc_path`. Default is `false`.
"""
return pulumi.get(self, "preserve_transactions")
@property
@pulumi.getter
def rfc4180(self) -> Optional[bool]:
"""
For an S3 source, whether each leading double quotation mark has to be followed by an ending double quotation mark. Default is `true`.
"""
return pulumi.get(self, "rfc4180")
@property
@pulumi.getter(name="rowGroupLength")
def row_group_length(self) -> Optional[int]:
"""
Number of rows in a row group. Default is `10000`.
"""
return pulumi.get(self, "row_group_length")
@property
@pulumi.getter(name="serverSideEncryptionKmsKeyId")
def server_side_encryption_kms_key_id(self) -> Optional[str]:
"""
ARN or Id of KMS Key to use when `encryption_mode` is `SSE_KMS`.
"""
return pulumi.get(self, "server_side_encryption_kms_key_id")
@property
@pulumi.getter(name="serviceAccessRoleArn")
def service_access_role_arn(self) -> Optional[str]:
"""
ARN of the IAM Role with permissions to write to the OpenSearch cluster.
"""
return pulumi.get(self, "service_access_role_arn")
@property
@pulumi.getter(name="timestampColumnName")
def timestamp_column_name(self) -> Optional[str]:
"""
Column to add with timestamp information to the endpoint data for an Amazon S3 target.
"""
return pulumi.get(self, "timestamp_column_name")
@property
@pulumi.getter(name="useCsvNoSupValue")
def use_csv_no_sup_value(self) -> Optional[bool]:
"""
Whether to use `csv_no_sup_value` for columns not included in the supplemental log.
"""
return pulumi.get(self, "use_csv_no_sup_value")
@property
@pulumi.getter(name="useTaskStartTimeForFullLoadTimestamp")
def use_task_start_time_for_full_load_timestamp(self) -> Optional[bool]:
"""
When set to true, uses the task start time as the timestamp column value instead of the time data is written to target. For full load, when set to true, each row of the timestamp column contains the task start time. For CDC loads, each row of the timestamp column contains the transaction commit time. When set to false, the full load timestamp in the timestamp column increments with the time data arrives at the target. Default is `false`.
"""
return pulumi.get(self, "use_task_start_time_for_full_load_timestamp")
@pulumi.output_type
class GetEndpointElasticsearchSettingResult(dict):
def __init__(__self__, *,
endpoint_uri: str,
error_retry_duration: int,
full_load_error_percentage: int,
service_access_role_arn: str):
pulumi.set(__self__, "endpoint_uri", endpoint_uri)
pulumi.set(__self__, "error_retry_duration", error_retry_duration)
pulumi.set(__self__, "full_load_error_percentage", full_load_error_percentage)
pulumi.set(__self__, "service_access_role_arn", service_access_role_arn)
@property
@pulumi.getter(name="endpointUri")
def endpoint_uri(self) -> str:
return pulumi.get(self, "endpoint_uri")
@property
@pulumi.getter(name="errorRetryDuration")
def error_retry_duration(self) -> int:
return pulumi.get(self, "error_retry_duration")
@property
@pulumi.getter(name="fullLoadErrorPercentage")
def full_load_error_percentage(self) -> int:
return pulumi.get(self, "full_load_error_percentage")
@property
@pulumi.getter(name="serviceAccessRoleArn")
def service_access_role_arn(self) -> str:
return pulumi.get(self, "service_access_role_arn")
@pulumi.output_type
class GetEndpointKafkaSettingResult(dict):
def __init__(__self__, *,
broker: str,
include_control_details: bool,
include_null_and_empty: bool,
include_partition_value: bool,
include_table_alter_operations: bool,
include_transaction_details: bool,
message_format: str,
message_max_bytes: int,
no_hex_prefix: bool,
partition_include_schema_table: bool,
sasl_password: str,
sasl_username: str,
security_protocol: str,
ssl_ca_certificate_arn: str,
ssl_client_certificate_arn: str,
ssl_client_key_arn: str,
ssl_client_key_password: str,
topic: str):
pulumi.set(__self__, "broker", broker)
pulumi.set(__self__, "include_control_details", include_control_details)
pulumi.set(__self__, "include_null_and_empty", include_null_and_empty)
pulumi.set(__self__, "include_partition_value", include_partition_value)
pulumi.set(__self__, "include_table_alter_operations", include_table_alter_operations)
pulumi.set(__self__, "include_transaction_details", include_transaction_details)
pulumi.set(__self__, "message_format", message_format)
pulumi.set(__self__, "message_max_bytes", message_max_bytes)
pulumi.set(__self__, "no_hex_prefix", no_hex_prefix)
pulumi.set(__self__, "partition_include_schema_table", partition_include_schema_table)
pulumi.set(__self__, "sasl_password", sasl_password)
pulumi.set(__self__, "sasl_username", sasl_username)
pulumi.set(__self__, "security_protocol", security_protocol)
pulumi.set(__self__, "ssl_ca_certificate_arn", ssl_ca_certificate_arn)
pulumi.set(__self__, "ssl_client_certificate_arn", ssl_client_certificate_arn)
pulumi.set(__self__, "ssl_client_key_arn", ssl_client_key_arn)
pulumi.set(__self__, "ssl_client_key_password", ssl_client_key_password)
pulumi.set(__self__, "topic", topic)
@property
@pulumi.getter
def broker(self) -> str:
return pulumi.get(self, "broker")
@property
@pulumi.getter(name="includeControlDetails")
def include_control_details(self) -> bool:
return pulumi.get(self, "include_control_details")
@property
@pulumi.getter(name="includeNullAndEmpty")
def include_null_and_empty(self) -> bool:
return pulumi.get(self, "include_null_and_empty")
@property
@pulumi.getter(name="includePartitionValue")
def include_partition_value(self) -> bool:
return pulumi.get(self, "include_partition_value")
@property
@pulumi.getter(name="includeTableAlterOperations")
def include_table_alter_operations(self) -> bool:
return pulumi.get(self, "include_table_alter_operations")
@property
@pulumi.getter(name="includeTransactionDetails")
def include_transaction_details(self) -> bool:
return pulumi.get(self, "include_transaction_details")
@property
@pulumi.getter(name="messageFormat")
def message_format(self) -> str:
return pulumi.get(self, "message_format")
@property
@pulumi.getter(name="messageMaxBytes")
def message_max_bytes(self) -> int:
return pulumi.get(self, "message_max_bytes")
@property
@pulumi.getter(name="noHexPrefix")
def no_hex_prefix(self) -> bool:
return pulumi.get(self, "no_hex_prefix")
@property
@pulumi.getter(name="partitionIncludeSchemaTable")
def partition_include_schema_table(self) -> bool:
return pulumi.get(self, "partition_include_schema_table")
@property
@pulumi.getter(name="saslPassword")
def sasl_password(self) -> str:
return pulumi.get(self, "sasl_password")
@property
@pulumi.getter(name="saslUsername")
def sasl_username(self) -> str:
return pulumi.get(self, "sasl_username")
@property
@pulumi.getter(name="securityProtocol")
def security_protocol(self) -> str:
return pulumi.get(self, "security_protocol")
@property
@pulumi.getter(name="sslCaCertificateArn")
def ssl_ca_certificate_arn(self) -> str:
return pulumi.get(self, "ssl_ca_certificate_arn")
@property
@pulumi.getter(name="sslClientCertificateArn")
def ssl_client_certificate_arn(self) -> str:
return pulumi.get(self, "ssl_client_certificate_arn")
@property
@pulumi.getter(name="sslClientKeyArn")
def ssl_client_key_arn(self) -> str:
return pulumi.get(self, "ssl_client_key_arn")
@property
@pulumi.getter(name="sslClientKeyPassword")
def ssl_client_key_password(self) -> str:
return pulumi.get(self, "ssl_client_key_password")
@property
@pulumi.getter
def topic(self) -> str:
return pulumi.get(self, "topic")
@pulumi.output_type
class GetEndpointKinesisSettingResult(dict):
def __init__(__self__, *,
include_control_details: bool,
include_null_and_empty: bool,
include_partition_value: bool,
include_table_alter_operations: bool,
include_transaction_details: bool,
message_format: str,
partition_include_schema_table: bool,
service_access_role_arn: str,
stream_arn: str):
pulumi.set(__self__, "include_control_details", include_control_details)
pulumi.set(__self__, "include_null_and_empty", include_null_and_empty)
pulumi.set(__self__, "include_partition_value", include_partition_value)
pulumi.set(__self__, "include_table_alter_operations", include_table_alter_operations)
pulumi.set(__self__, "include_transaction_details", include_transaction_details)
pulumi.set(__self__, "message_format", message_format)
pulumi.set(__self__, "partition_include_schema_table", partition_include_schema_table)
pulumi.set(__self__, "service_access_role_arn", service_access_role_arn)
pulumi.set(__self__, "stream_arn", stream_arn)
@property
@pulumi.getter(name="includeControlDetails")
def include_control_details(self) -> bool:
return pulumi.get(self, "include_control_details")
@property
@pulumi.getter(name="includeNullAndEmpty")
def include_null_and_empty(self) -> bool:
return pulumi.get(self, "include_null_and_empty")
@property
@pulumi.getter(name="includePartitionValue")
def include_partition_value(self) -> bool:
return pulumi.get(self, "include_partition_value")
@property
@pulumi.getter(name="includeTableAlterOperations")
def include_table_alter_operations(self) -> bool:
return pulumi.get(self, "include_table_alter_operations")
@property
@pulumi.getter(name="includeTransactionDetails")
def include_transaction_details(self) -> bool:
return pulumi.get(self, "include_transaction_details")
@property
@pulumi.getter(name="messageFormat")
def message_format(self) -> str:
return pulumi.get(self, "message_format")
@property
@pulumi.getter(name="partitionIncludeSchemaTable")
def partition_include_schema_table(self) -> bool:
return pulumi.get(self, "partition_include_schema_table")
@property
@pulumi.getter(name="serviceAccessRoleArn")
def service_access_role_arn(self) -> str:
return pulumi.get(self, "service_access_role_arn")
@property
@pulumi.getter(name="streamArn")
def stream_arn(self) -> str:
return pulumi.get(self, "stream_arn")
@pulumi.output_type
class GetEndpointMongodbSettingResult(dict):
def __init__(__self__, *,
auth_mechanism: str,
auth_source: str,
auth_type: str,
docs_to_investigate: str,
extract_doc_id: str,
nesting_level: str):
pulumi.set(__self__, "auth_mechanism", auth_mechanism)
pulumi.set(__self__, "auth_source", auth_source)
pulumi.set(__self__, "auth_type", auth_type)
pulumi.set(__self__, "docs_to_investigate", docs_to_investigate)
pulumi.set(__self__, "extract_doc_id", extract_doc_id)
pulumi.set(__self__, "nesting_level", nesting_level)
@property
@pulumi.getter(name="authMechanism")
def auth_mechanism(self) -> str:
return pulumi.get(self, "auth_mechanism")
@property
@pulumi.getter(name="authSource")
def auth_source(self) -> str:
return pulumi.get(self, "auth_source")
@property
@pulumi.getter(name="authType")
def auth_type(self) -> str:
return pulumi.get(self, "auth_type")
@property
@pulumi.getter(name="docsToInvestigate")
def docs_to_investigate(self) -> str:
return pulumi.get(self, "docs_to_investigate")
@property
@pulumi.getter(name="extractDocId")
def extract_doc_id(self) -> str:
return pulumi.get(self, "extract_doc_id")
@property
@pulumi.getter(name="nestingLevel")
def nesting_level(self) -> str:
return pulumi.get(self, "nesting_level")
@pulumi.output_type
class GetEndpointRedisSettingResult(dict):
def __init__(__self__, *,
auth_password: str,
auth_type: str,
auth_user_name: str,
port: int,
server_name: str,
ssl_ca_certificate_arn: str,
ssl_security_protocol: str):
pulumi.set(__self__, "auth_password", auth_password)
pulumi.set(__self__, "auth_type", auth_type)
pulumi.set(__self__, "auth_user_name", auth_user_name)
pulumi.set(__self__, "port", port)
pulumi.set(__self__, "server_name", server_name)
pulumi.set(__self__, "ssl_ca_certificate_arn", ssl_ca_certificate_arn)
pulumi.set(__self__, "ssl_security_protocol", ssl_security_protocol)
@property
@pulumi.getter(name="authPassword")
def auth_password(self) -> str:
return pulumi.get(self, "auth_password")
@property
@pulumi.getter(name="authType")
def auth_type(self) -> str:
return pulumi.get(self, "auth_type")
@property
@pulumi.getter(name="authUserName")
def auth_user_name(self) -> str:
return pulumi.get(self, "auth_user_name")
@property
@pulumi.getter
def port(self) -> int:
return pulumi.get(self, "port")
@property
@pulumi.getter(name="serverName")
def server_name(self) -> str:
return pulumi.get(self, "server_name")
@property
@pulumi.getter(name="sslCaCertificateArn")
def ssl_ca_certificate_arn(self) -> str:
return pulumi.get(self, "ssl_ca_certificate_arn")
@property
@pulumi.getter(name="sslSecurityProtocol")
def ssl_security_protocol(self) -> str:
return pulumi.get(self, "ssl_security_protocol")
@pulumi.output_type
class GetEndpointRedshiftSettingResult(dict):
def __init__(__self__, *,
bucket_folder: str,
bucket_name: str,
encryption_mode: str,
server_side_encryption_kms_key_id: str,
service_access_role_arn: str):
pulumi.set(__self__, "bucket_folder", bucket_folder)
pulumi.set(__self__, "bucket_name", bucket_name)
pulumi.set(__self__, "encryption_mode", encryption_mode)
pulumi.set(__self__, "server_side_encryption_kms_key_id", server_side_encryption_kms_key_id)
pulumi.set(__self__, "service_access_role_arn", service_access_role_arn)
@property
@pulumi.getter(name="bucketFolder")
def bucket_folder(self) -> str:
return pulumi.get(self, "bucket_folder")
@property
@pulumi.getter(name="bucketName")
def bucket_name(self) -> str:
return pulumi.get(self, "bucket_name")
@property
@pulumi.getter(name="encryptionMode")
def encryption_mode(self) -> str:
return pulumi.get(self, "encryption_mode")
@property
@pulumi.getter(name="serverSideEncryptionKmsKeyId")
def server_side_encryption_kms_key_id(self) -> str:
return pulumi.get(self, "server_side_encryption_kms_key_id")
@property
@pulumi.getter(name="serviceAccessRoleArn")
def service_access_role_arn(self) -> str:
return pulumi.get(self, "service_access_role_arn")
@pulumi.output_type
class GetEndpointS3SettingResult(dict):
def __init__(__self__, *,
add_column_name: bool,
bucket_folder: str,
bucket_name: str,
canned_acl_for_objects: str,
cdc_inserts_and_updates: bool,
cdc_inserts_only: bool,
cdc_max_batch_interval: int,
cdc_min_file_size: int,
cdc_path: str,
compression_type: str,
csv_delimiter: str,
csv_no_sup_value: str,
csv_null_value: str,
csv_row_delimiter: str,
data_format: str,
data_page_size: int,
date_partition_delimiter: str,
date_partition_enabled: bool,
date_partition_sequence: str,
dict_page_size_limit: int,
enable_statistics: bool,
encoding_type: str,
encryption_mode: str,
external_table_definition: str,
ignore_header_rows: int,
ignore_headers_row: int,
include_op_for_full_load: bool,
max_file_size: int,
parquet_timestamp_in_millisecond: bool,
parquet_version: str,
preserve_transactions: bool,
rfc4180: bool,
row_group_length: int,
server_side_encryption_kms_key_id: str,
service_access_role_arn: str,
timestamp_column_name: str,
use_csv_no_sup_value: bool,
use_task_start_time_for_full_load_timestamp: bool):
pulumi.set(__self__, "add_column_name", add_column_name)
pulumi.set(__self__, "bucket_folder", bucket_folder)
pulumi.set(__self__, "bucket_name", bucket_name)
pulumi.set(__self__, "canned_acl_for_objects", canned_acl_for_objects)
pulumi.set(__self__, "cdc_inserts_and_updates", cdc_inserts_and_updates)
pulumi.set(__self__, "cdc_inserts_only", cdc_inserts_only)
pulumi.set(__self__, "cdc_max_batch_interval", cdc_max_batch_interval)
pulumi.set(__self__, "cdc_min_file_size", cdc_min_file_size)
pulumi.set(__self__, "cdc_path", cdc_path)
pulumi.set(__self__, "compression_type", compression_type)
pulumi.set(__self__, "csv_delimiter", csv_delimiter)
pulumi.set(__self__, "csv_no_sup_value", csv_no_sup_value)
pulumi.set(__self__, "csv_null_value", csv_null_value)
pulumi.set(__self__, "csv_row_delimiter", csv_row_delimiter)
pulumi.set(__self__, "data_format", data_format)
pulumi.set(__self__, "data_page_size", data_page_size)
pulumi.set(__self__, "date_partition_delimiter", date_partition_delimiter)
pulumi.set(__self__, "date_partition_enabled", date_partition_enabled)
pulumi.set(__self__, "date_partition_sequence", date_partition_sequence)
pulumi.set(__self__, "dict_page_size_limit", dict_page_size_limit)
pulumi.set(__self__, "enable_statistics", enable_statistics)
pulumi.set(__self__, "encoding_type", encoding_type)
pulumi.set(__self__, "encryption_mode", encryption_mode)
pulumi.set(__self__, "external_table_definition", external_table_definition)
pulumi.set(__self__, "ignore_header_rows", ignore_header_rows)
pulumi.set(__self__, "ignore_headers_row", ignore_headers_row)
pulumi.set(__self__, "include_op_for_full_load", include_op_for_full_load)
pulumi.set(__self__, "max_file_size", max_file_size)
pulumi.set(__self__, "parquet_timestamp_in_millisecond", parquet_timestamp_in_millisecond)
pulumi.set(__self__, "parquet_version", parquet_version)
pulumi.set(__self__, "preserve_transactions", preserve_transactions)
pulumi.set(__self__, "rfc4180", rfc4180)
pulumi.set(__self__, "row_group_length", row_group_length)
pulumi.set(__self__, "server_side_encryption_kms_key_id", server_side_encryption_kms_key_id)
pulumi.set(__self__, "service_access_role_arn", service_access_role_arn)
pulumi.set(__self__, "timestamp_column_name", timestamp_column_name)
pulumi.set(__self__, "use_csv_no_sup_value", use_csv_no_sup_value)
pulumi.set(__self__, "use_task_start_time_for_full_load_timestamp", use_task_start_time_for_full_load_timestamp)
@property
@pulumi.getter(name="addColumnName")
def add_column_name(self) -> bool:
return pulumi.get(self, "add_column_name")
@property
@pulumi.getter(name="bucketFolder")
def bucket_folder(self) -> str:
return pulumi.get(self, "bucket_folder")
@property
@pulumi.getter(name="bucketName")
def bucket_name(self) -> str:
return pulumi.get(self, "bucket_name")
@property
@pulumi.getter(name="cannedAclForObjects")
def canned_acl_for_objects(self) -> str:
return pulumi.get(self, "canned_acl_for_objects")
@property
@pulumi.getter(name="cdcInsertsAndUpdates")
def cdc_inserts_and_updates(self) -> bool:
return pulumi.get(self, "cdc_inserts_and_updates")
@property
@pulumi.getter(name="cdcInsertsOnly")
def cdc_inserts_only(self) -> bool:
return pulumi.get(self, "cdc_inserts_only")
@property
@pulumi.getter(name="cdcMaxBatchInterval")
def cdc_max_batch_interval(self) -> int:
return pulumi.get(self, "cdc_max_batch_interval")
@property
@pulumi.getter(name="cdcMinFileSize")
def cdc_min_file_size(self) -> int:
return pulumi.get(self, "cdc_min_file_size")
@property
@pulumi.getter(name="cdcPath")
def cdc_path(self) -> str:
return pulumi.get(self, "cdc_path")
@property
@pulumi.getter(name="compressionType")
def compression_type(self) -> str:
return pulumi.get(self, "compression_type")
@property
@pulumi.getter(name="csvDelimiter")
def csv_delimiter(self) -> str:
return pulumi.get(self, "csv_delimiter")
@property
@pulumi.getter(name="csvNoSupValue")
def csv_no_sup_value(self) -> str:
return pulumi.get(self, "csv_no_sup_value")
@property
@pulumi.getter(name="csvNullValue")
def csv_null_value(self) -> str:
return pulumi.get(self, "csv_null_value")
@property
@pulumi.getter(name="csvRowDelimiter")
def csv_row_delimiter(self) -> str:
return pulumi.get(self, "csv_row_delimiter")
@property
@pulumi.getter(name="dataFormat")
def data_format(self) -> str:
return pulumi.get(self, "data_format")
@property
@pulumi.getter(name="dataPageSize")
def data_page_size(self) -> int:
return pulumi.get(self, "data_page_size")
@property
@pulumi.getter(name="datePartitionDelimiter")
def date_partition_delimiter(self) -> str:
return pulumi.get(self, "date_partition_delimiter")
@property
@pulumi.getter(name="datePartitionEnabled")
def date_partition_enabled(self) -> bool:
return pulumi.get(self, "date_partition_enabled")
@property
@pulumi.getter(name="datePartitionSequence")
def date_partition_sequence(self) -> str:
return pulumi.get(self, "date_partition_sequence")
@property
@pulumi.getter(name="dictPageSizeLimit")
def dict_page_size_limit(self) -> int:
return pulumi.get(self, "dict_page_size_limit")
@property
@pulumi.getter(name="enableStatistics")
def enable_statistics(self) -> bool:
return pulumi.get(self, "enable_statistics")
@property
@pulumi.getter(name="encodingType")
def encoding_type(self) -> str:
return pulumi.get(self, "encoding_type")
@property
@pulumi.getter(name="encryptionMode")
def encryption_mode(self) -> str:
return pulumi.get(self, "encryption_mode")
@property
@pulumi.getter(name="externalTableDefinition")
def external_table_definition(self) -> str:
return pulumi.get(self, "external_table_definition")
@property
@pulumi.getter(name="ignoreHeaderRows")
def ignore_header_rows(self) -> int:
return pulumi.get(self, "ignore_header_rows")
@property
@pulumi.getter(name="ignoreHeadersRow")
def ignore_headers_row(self) -> int:
return pulumi.get(self, "ignore_headers_row")
@property
@pulumi.getter(name="includeOpForFullLoad")
def include_op_for_full_load(self) -> bool:
return pulumi.get(self, "include_op_for_full_load")
@property
@pulumi.getter(name="maxFileSize")
def max_file_size(self) -> int:
return pulumi.get(self, "max_file_size")
@property
@pulumi.getter(name="parquetTimestampInMillisecond")
def parquet_timestamp_in_millisecond(self) -> bool:
return pulumi.get(self, "parquet_timestamp_in_millisecond")
@property
@pulumi.getter(name="parquetVersion")
def parquet_version(self) -> str:
return pulumi.get(self, "parquet_version")
@property
@pulumi.getter(name="preserveTransactions")
def preserve_transactions(self) -> bool:
return pulumi.get(self, "preserve_transactions")
@property
@pulumi.getter
def rfc4180(self) -> bool:
return pulumi.get(self, "rfc4180")
@property
@pulumi.getter(name="rowGroupLength")
def row_group_length(self) -> int:
return pulumi.get(self, "row_group_length")
@property
@pulumi.getter(name="serverSideEncryptionKmsKeyId")
def server_side_encryption_kms_key_id(self) -> str:
return pulumi.get(self, "server_side_encryption_kms_key_id")
@property
@pulumi.getter(name="serviceAccessRoleArn")
def service_access_role_arn(self) -> str:
return pulumi.get(self, "service_access_role_arn")
@property
@pulumi.getter(name="timestampColumnName")
def timestamp_column_name(self) -> str:
return pulumi.get(self, "timestamp_column_name")
@property
@pulumi.getter(name="useCsvNoSupValue")
def use_csv_no_sup_value(self) -> bool:
return pulumi.get(self, "use_csv_no_sup_value")
@property
@pulumi.getter(name="useTaskStartTimeForFullLoadTimestamp")
def use_task_start_time_for_full_load_timestamp(self) -> bool:
return pulumi.get(self, "use_task_start_time_for_full_load_timestamp")
|
95fef38b2c75336cf3d5837c3ca42861b7c53f32
|
f3806d9fb54773908cd9704121a543b114470aca
|
/angr/procedures/definitions/win32_api-ms-win-gaming-tcui-l1-1-1.py
|
0965c4c7d5745fe6569b5a337997cfabf2ff9dee
|
[
"BSD-2-Clause"
] |
permissive
|
angr/angr
|
8ae95fceca51b0a001de56477d984dd01193ac1d
|
37e8ca1c3308ec601ad1d7c6bc8081ff38a7cffd
|
refs/heads/master
| 2023-08-17T03:15:21.007865
| 2023-08-15T18:44:57
| 2023-08-15T18:44:57
| 40,328,394
| 7,184
| 1,306
|
BSD-2-Clause
| 2023-09-14T20:14:23
| 2015-08-06T21:46:55
|
Python
|
UTF-8
|
Python
| false
| false
| 1,888
|
py
|
win32_api-ms-win-gaming-tcui-l1-1-1.py
|
# pylint:disable=line-too-long
import logging
from ...sim_type import SimTypeFunction, SimTypeShort, SimTypeInt, SimTypeLong, SimTypeLongLong, SimTypeDouble, SimTypeFloat, SimTypePointer, SimTypeChar, SimStruct, SimTypeFixedSizeArray, SimTypeBottom, SimUnion, SimTypeBool
from ...calling_conventions import SimCCStdcall, SimCCMicrosoftAMD64
from .. import SIM_PROCEDURES as P
from . import SimLibrary
_l = logging.getLogger(name=__name__)
lib = SimLibrary()
lib.set_default_cc('X86', SimCCStdcall)
lib.set_default_cc('AMD64', SimCCMicrosoftAMD64)
lib.set_library_names("api-ms-win-gaming-tcui-l1-1-1.dll")
prototypes = \
{
#
'CheckGamingPrivilegeWithUI': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeFunction([SimTypeInt(signed=True, label="Int32"), SimTypePointer(SimTypeBottom(label="Void"), offset=0)], SimTypeBottom(label="Void"), arg_names=["returnCode", "context"]), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["privilegeId", "scope", "policy", "friendlyMessage", "completionRoutine", "context"]),
#
'CheckGamingPrivilegeSilently': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["privilegeId", "scope", "policy", "hasPrivilege"]),
}
lib.set_prototypes(prototypes)
|
2a5dc86a90bd53d5f7ba837e55e3f80078e0874c
|
b19529491b2db47f2b69d39fba16a86bee2353bc
|
/examples/movie_details.py
|
6a235de2e4660955dd1f7f2a512f521dd4272ef7
|
[] |
no_license
|
AnthonyBloomer/tmdbv3api
|
c4f7a7db5eee87ca50e0467d7b73b4e54d5c5731
|
c5e96af23494b6b5506e84ed3b0be626ee25cb26
|
refs/heads/master
| 2023-08-24T08:23:16.476455
| 2023-08-14T11:36:43
| 2023-08-14T11:36:43
| 48,133,546
| 208
| 70
| null | 2023-01-04T08:37:59
| 2015-12-16T20:30:24
|
Python
|
UTF-8
|
Python
| false
| false
| 162
|
py
|
movie_details.py
|
from tmdbv3api import TMDb, Movie
tmdb = TMDb()
tmdb.api_key = ""
movie = Movie()
m = movie.details(111)
print(m.title)
print(m.overview)
print(m.popularity)
|
fcedb8983947b43de4a9a8409000365fe37bc992
|
cf140b80ae581fde086820bd4efc0e3594111864
|
/gql/dsl.py
|
adc48bea549f197da64ce00a92f9dca3b4b70c25
|
[
"MIT"
] |
permissive
|
graphql-python/gql
|
2f059acda879272de06ea18ecc59605524f1c2cc
|
013fa6aceb066be843849e83cc72747f51c07d0e
|
refs/heads/master
| 2023-08-29T19:42:37.812918
| 2023-07-26T10:06:47
| 2023-07-26T10:06:47
| 62,077,169
| 1,378
| 211
|
MIT
| 2023-09-13T20:43:11
| 2016-06-27T18:08:42
|
Python
|
UTF-8
|
Python
| false
| false
| 36,033
|
py
|
dsl.py
|
"""
.. image:: http://www.plantuml.com/plantuml/png/ZLAzJWCn3Dxz51vXw1im50ag8L4XwC1OkLTJ8gMvAd4GwEYxGuC8pTbKtUxy_TZEvsaIYfAt7e1MII9rWfsdbF1cSRzWpvtq4GT0JENduX8GXr_g7brQlf5tw-MBOx_-HlS0LV_Kzp8xr1kZav9PfCsMWvolEA_1VylHoZCExKwKv4Tg2s_VkSkca2kof2JDb0yxZYIk3qMZYUe1B1uUZOROXn96pQMugEMUdRnUUqUf6DBXQyIz2zu5RlgUQAFVNYaeRfBI79_JrUTaeg9JZFQj5MmUc69PDmNGE2iU61fDgfri3x36gxHw3gDHD6xqqQ7P4vjKqz2-602xtkO7uo17SCLhVSv25VjRjUAFcUE73Sspb8ADBl8gTT7j2cFAOPst_Wi0 # noqa
:alt: UML diagram
"""
import logging
import re
from abc import ABC, abstractmethod
from math import isfinite
from typing import Any, Dict, Iterable, Mapping, Optional, Tuple, Union, cast
from graphql import (
ArgumentNode,
BooleanValueNode,
DocumentNode,
EnumValueNode,
FieldNode,
FloatValueNode,
FragmentDefinitionNode,
FragmentSpreadNode,
GraphQLArgument,
GraphQLEnumType,
GraphQLError,
GraphQLField,
GraphQLID,
GraphQLInputObjectType,
GraphQLInputType,
GraphQLInterfaceType,
GraphQLList,
GraphQLNamedType,
GraphQLNonNull,
GraphQLObjectType,
GraphQLScalarType,
GraphQLSchema,
GraphQLString,
InlineFragmentNode,
IntValueNode,
ListTypeNode,
ListValueNode,
NamedTypeNode,
NameNode,
NonNullTypeNode,
NullValueNode,
ObjectFieldNode,
ObjectValueNode,
OperationDefinitionNode,
OperationType,
SelectionSetNode,
StringValueNode,
TypeNode,
Undefined,
ValueNode,
VariableDefinitionNode,
VariableNode,
get_named_type,
introspection_types,
is_enum_type,
is_input_object_type,
is_leaf_type,
is_list_type,
is_non_null_type,
is_wrapping_type,
print_ast,
)
from graphql.pyutils import inspect
from .utils import to_camel_case
log = logging.getLogger(__name__)
_re_integer_string = re.compile("^-?(?:0|[1-9][0-9]*)$")
def ast_from_serialized_value_untyped(serialized: Any) -> Optional[ValueNode]:
"""Given a serialized value, try our best to produce an AST.
Anything ressembling an array (instance of Mapping) will be converted
to an ObjectFieldNode.
Anything ressembling a list (instance of Iterable - except str)
will be converted to a ListNode.
In some cases, a custom scalar can be serialized differently in the query
than in the variables. In that case, this function will not work."""
if serialized is None or serialized is Undefined:
return NullValueNode()
if isinstance(serialized, Mapping):
field_items = (
(key, ast_from_serialized_value_untyped(value))
for key, value in serialized.items()
)
field_nodes = tuple(
ObjectFieldNode(name=NameNode(value=field_name), value=field_value)
for field_name, field_value in field_items
if field_value
)
return ObjectValueNode(fields=field_nodes)
if isinstance(serialized, Iterable) and not isinstance(serialized, str):
maybe_nodes = (ast_from_serialized_value_untyped(item) for item in serialized)
nodes = tuple(node for node in maybe_nodes if node)
return ListValueNode(values=nodes)
if isinstance(serialized, bool):
return BooleanValueNode(value=serialized)
if isinstance(serialized, int):
return IntValueNode(value=str(serialized))
if isinstance(serialized, float) and isfinite(serialized):
value = str(serialized)
if value.endswith(".0"):
value = value[:-2]
return FloatValueNode(value=value)
if isinstance(serialized, str):
return StringValueNode(value=serialized)
raise TypeError(f"Cannot convert value to AST: {inspect(serialized)}.")
def ast_from_value(value: Any, type_: GraphQLInputType) -> Optional[ValueNode]:
"""
This is a partial copy paste of the ast_from_value function in
graphql-core utilities/ast_from_value.py
Overwrite the if blocks that use recursion and add a new case to return a
VariableNode when value is a DSLVariable
Produce a GraphQL Value AST given a Python object.
Raises a GraphQLError instead of returning None if we receive an Undefined
of if we receive a Null value for a Non-Null type.
"""
if isinstance(value, DSLVariable):
return value.set_type(type_).ast_variable_name
if is_non_null_type(type_):
type_ = cast(GraphQLNonNull, type_)
inner_type = type_.of_type
ast_value = ast_from_value(value, inner_type)
if isinstance(ast_value, NullValueNode):
raise GraphQLError(
"Received Null value for a Non-Null type " f"{inspect(inner_type)}."
)
return ast_value
# only explicit None, not Undefined or NaN
if value is None:
return NullValueNode()
# undefined
if value is Undefined:
raise GraphQLError(f"Received Undefined value for type {inspect(type_)}.")
# Convert Python list to GraphQL list. If the GraphQLType is a list, but the value
# is not a list, convert the value using the list's item type.
if is_list_type(type_):
type_ = cast(GraphQLList, type_)
item_type = type_.of_type
if isinstance(value, Iterable) and not isinstance(value, str):
maybe_value_nodes = (ast_from_value(item, item_type) for item in value)
value_nodes = tuple(node for node in maybe_value_nodes if node)
return ListValueNode(values=value_nodes)
return ast_from_value(value, item_type)
# Populate the fields of the input object by creating ASTs from each value in the
# Python dict according to the fields in the input type.
if is_input_object_type(type_):
if value is None or not isinstance(value, Mapping):
return None
type_ = cast(GraphQLInputObjectType, type_)
field_items = (
(field_name, ast_from_value(value[field_name], field.type))
for field_name, field in type_.fields.items()
if field_name in value
)
field_nodes = tuple(
ObjectFieldNode(name=NameNode(value=field_name), value=field_value)
for field_name, field_value in field_items
if field_value
)
return ObjectValueNode(fields=field_nodes)
if is_leaf_type(type_):
# Since value is an internally represented value, it must be serialized to an
# externally represented value before converting into an AST.
serialized = type_.serialize(value) # type: ignore
# if the serialized value is a string, then we should use the
# type to determine if it is an enum, an ID or a normal string
if isinstance(serialized, str):
# Enum types use Enum literals.
if is_enum_type(type_):
return EnumValueNode(value=serialized)
# ID types can use Int literals.
if type_ is GraphQLID and _re_integer_string.match(serialized):
return IntValueNode(value=serialized)
return StringValueNode(value=serialized)
# Some custom scalars will serialize to dicts or lists
# Providing here a default conversion to AST using our best judgment
# until graphql-js issue #1817 is solved
# https://github.com/graphql/graphql-js/issues/1817
return ast_from_serialized_value_untyped(serialized)
# Not reachable. All possible input types have been considered.
raise TypeError(f"Unexpected input type: {inspect(type_)}.")
def dsl_gql(
*operations: "DSLExecutable", **operations_with_name: "DSLExecutable"
) -> DocumentNode:
r"""Given arguments instances of :class:`DSLExecutable`
containing GraphQL operations or fragments,
generate a Document which can be executed later in a
gql client or a gql session.
Similar to the :func:`gql.gql` function but instead of parsing a python
string to describe the request, we are using operations which have been generated
dynamically using instances of :class:`DSLField`, generated
by instances of :class:`DSLType` which themselves originated from
a :class:`DSLSchema` class.
:param \*operations: the GraphQL operations and fragments
:type \*operations: DSLQuery, DSLMutation, DSLSubscription, DSLFragment
:param \**operations_with_name: the GraphQL operations with an operation name
:type \**operations_with_name: DSLQuery, DSLMutation, DSLSubscription
:return: a Document which can be later executed or subscribed by a
:class:`Client <gql.client.Client>`, by an
:class:`async session <gql.client.AsyncClientSession>` or by a
:class:`sync session <gql.client.SyncClientSession>`
:raises TypeError: if an argument is not an instance of :class:`DSLExecutable`
:raises AttributeError: if a type has not been provided in a :class:`DSLFragment`
"""
# Concatenate operations without and with name
all_operations: Tuple["DSLExecutable", ...] = (
*operations,
*(operation for operation in operations_with_name.values()),
)
# Set the operation name
for name, operation in operations_with_name.items():
operation.name = name
# Check the type
for operation in all_operations:
if not isinstance(operation, DSLExecutable):
raise TypeError(
"Operations should be instances of DSLExecutable "
"(DSLQuery, DSLMutation, DSLSubscription or DSLFragment).\n"
f"Received: {type(operation)}."
)
return DocumentNode(
definitions=[operation.executable_ast for operation in all_operations]
)
class DSLSchema:
"""The DSLSchema is the root of the DSL code.
Attributes of the DSLSchema class are generated automatically
with the `__getattr__` dunder method in order to generate
instances of :class:`DSLType`
"""
def __init__(self, schema: GraphQLSchema):
"""Initialize the DSLSchema with the given schema.
:param schema: a GraphQL Schema provided locally or fetched using
an introspection query. Usually `client.schema`
:type schema: GraphQLSchema
:raises TypeError: if the argument is not an instance of :class:`GraphQLSchema`
"""
if not isinstance(schema, GraphQLSchema):
raise TypeError(
f"DSLSchema needs a schema as parameter. Received: {type(schema)}"
)
self._schema: GraphQLSchema = schema
def __getattr__(self, name: str) -> "DSLType":
type_def: Optional[GraphQLNamedType] = self._schema.get_type(name)
if type_def is None:
raise AttributeError(f"Type '{name}' not found in the schema!")
if not isinstance(type_def, (GraphQLObjectType, GraphQLInterfaceType)):
raise AttributeError(
f'Type "{name} ({type_def!r})" is not valid as an attribute of'
" DSLSchema. Only Object types or Interface types are accepted."
)
return DSLType(type_def, self)
class DSLSelector(ABC):
"""DSLSelector is an abstract class which defines the
:meth:`select <gql.dsl.DSLSelector.select>` method to select
children fields in the query.
Inherited by
:class:`DSLRootFieldSelector <gql.dsl.DSLRootFieldSelector>`,
:class:`DSLFieldSelector <gql.dsl.DSLFieldSelector>`
:class:`DSLFragmentSelector <gql.dsl.DSLFragmentSelector>`
"""
selection_set: SelectionSetNode
def __init__(
self,
*fields: "DSLSelectable",
**fields_with_alias: "DSLSelectableWithAlias",
):
""":meta private:"""
self.selection_set = SelectionSetNode(selections=())
if fields or fields_with_alias:
self.select(*fields, **fields_with_alias)
@abstractmethod
def is_valid_field(self, field: "DSLSelectable") -> bool:
raise NotImplementedError(
"Any DSLSelector subclass must have a is_valid_field method"
) # pragma: no cover
def select(
self,
*fields: "DSLSelectable",
**fields_with_alias: "DSLSelectableWithAlias",
):
r"""Select the fields which should be added.
:param \*fields: fields or fragments
:type \*fields: DSLSelectable
:param \**fields_with_alias: fields or fragments with alias as key
:type \**fields_with_alias: DSLSelectable
:raises TypeError: if an argument is not an instance of :class:`DSLSelectable`
:raises GraphQLError: if an argument is not a valid field
"""
# Concatenate fields without and with alias
added_fields: Tuple["DSLSelectable", ...] = DSLField.get_aliased_fields(
fields, fields_with_alias
)
# Check that each field is valid
for field in added_fields:
if not isinstance(field, DSLSelectable):
raise TypeError(
"Fields should be instances of DSLSelectable. "
f"Received: {type(field)}"
)
if not self.is_valid_field(field):
raise GraphQLError(f"Invalid field for {self!r}: {field!r}")
# Get a list of AST Nodes for each added field
added_selections: Tuple[
Union[FieldNode, InlineFragmentNode, FragmentSpreadNode], ...
] = tuple(field.ast_field for field in added_fields)
# Update the current selection list with new selections
self.selection_set.selections = self.selection_set.selections + added_selections
log.debug(f"Added fields: {added_fields} in {self!r}")
class DSLExecutable(DSLSelector):
"""Interface for the root elements which can be executed
in the :func:`dsl_gql <gql.dsl.dsl_gql>` function
Inherited by
:class:`DSLOperation <gql.dsl.DSLOperation>` and
:class:`DSLFragment <gql.dsl.DSLFragment>`
"""
variable_definitions: "DSLVariableDefinitions"
name: Optional[str]
selection_set: SelectionSetNode
@property
@abstractmethod
def executable_ast(self):
"""Generates the ast for :func:`dsl_gql <gql.dsl.dsl_gql>`."""
raise NotImplementedError(
"Any DSLExecutable subclass must have executable_ast property"
) # pragma: no cover
def __init__(
self,
*fields: "DSLSelectable",
**fields_with_alias: "DSLSelectableWithAlias",
):
r"""Given arguments of type :class:`DSLSelectable` containing GraphQL requests,
generate an operation which can be converted to a Document
using the :func:`dsl_gql <gql.dsl.dsl_gql>`.
The fields arguments should be either be fragments or
fields of root GraphQL types
(Query, Mutation or Subscription) and correspond to the
operation_type of this operation.
:param \*fields: root fields or fragments
:type \*fields: DSLSelectable
:param \**fields_with_alias: root fields or fragments with alias as key
:type \**fields_with_alias: DSLSelectable
:raises TypeError: if an argument is not an instance of :class:`DSLSelectable`
:raises AssertionError: if an argument is not a field which correspond
to the operation type
"""
self.name = None
self.variable_definitions = DSLVariableDefinitions()
DSLSelector.__init__(self, *fields, **fields_with_alias)
class DSLRootFieldSelector(DSLSelector):
"""Class used to define the
:meth:`is_valid_field <gql.dsl.DSLRootFieldSelector.is_valid_field>` method
for root fields for the :meth:`select <gql.dsl.DSLSelector.select>` method.
Inherited by
:class:`DSLOperation <gql.dsl.DSLOperation>`
"""
def is_valid_field(self, field: "DSLSelectable") -> bool:
"""Check that a field is valid for a root field.
For operations, the fields arguments should be fields of root GraphQL types
(Query, Mutation or Subscription) and correspond to the
operation_type of this operation.
the :code:`__typename` field can only be added to Query or Mutation.
the :code:`__schema` and :code:`__type` field can only be added to Query.
"""
assert isinstance(self, DSLOperation)
operation_name = self.operation_type.name
if isinstance(field, DSLMetaField):
if field.name in ["__schema", "__type"]:
return operation_name == "QUERY"
if field.name == "__typename":
return operation_name != "SUBSCRIPTION"
elif isinstance(field, DSLField):
assert field.dsl_type is not None
schema = field.dsl_type._dsl_schema._schema
root_type = None
if operation_name == "QUERY":
root_type = schema.query_type
elif operation_name == "MUTATION":
root_type = schema.mutation_type
elif operation_name == "SUBSCRIPTION":
root_type = schema.subscription_type
if root_type is None:
log.error(
f"Root type of type {operation_name} not found in the schema!"
)
return False
return field.parent_type.name == root_type.name
return False
class DSLOperation(DSLExecutable, DSLRootFieldSelector):
"""Interface for GraphQL operations.
Inherited by
:class:`DSLQuery <gql.dsl.DSLQuery>`,
:class:`DSLMutation <gql.dsl.DSLMutation>` and
:class:`DSLSubscription <gql.dsl.DSLSubscription>`
"""
operation_type: OperationType
@property
def executable_ast(self) -> OperationDefinitionNode:
"""Generates the ast for :func:`dsl_gql <gql.dsl.dsl_gql>`."""
return OperationDefinitionNode(
operation=OperationType(self.operation_type),
selection_set=self.selection_set,
variable_definitions=self.variable_definitions.get_ast_definitions(),
**({"name": NameNode(value=self.name)} if self.name else {}),
)
def __repr__(self) -> str:
return f"<{self.__class__.__name__}>"
class DSLQuery(DSLOperation):
operation_type = OperationType.QUERY
class DSLMutation(DSLOperation):
operation_type = OperationType.MUTATION
class DSLSubscription(DSLOperation):
operation_type = OperationType.SUBSCRIPTION
class DSLVariable:
"""The DSLVariable represents a single variable defined in a GraphQL operation
Instances of this class are generated for you automatically as attributes
of the :class:`DSLVariableDefinitions`
The type of the variable is set by the :class:`DSLField` instance that receives it
in the :meth:`args <gql.dsl.DSLField.args>` method.
"""
def __init__(self, name: str):
""":meta private:"""
self.name = name
self.ast_variable_type: Optional[TypeNode] = None
self.ast_variable_name = VariableNode(name=NameNode(value=self.name))
self.default_value = None
self.type: Optional[GraphQLInputType] = None
def to_ast_type(self, type_: GraphQLInputType) -> TypeNode:
if is_wrapping_type(type_):
if isinstance(type_, GraphQLList):
return ListTypeNode(type=self.to_ast_type(type_.of_type))
elif isinstance(type_, GraphQLNonNull):
return NonNullTypeNode(type=self.to_ast_type(type_.of_type))
assert isinstance(
type_, (GraphQLScalarType, GraphQLEnumType, GraphQLInputObjectType)
)
return NamedTypeNode(name=NameNode(value=type_.name))
def set_type(self, type_: GraphQLInputType) -> "DSLVariable":
self.type = type_
self.ast_variable_type = self.to_ast_type(type_)
return self
def default(self, default_value: Any) -> "DSLVariable":
self.default_value = default_value
return self
class DSLVariableDefinitions:
"""The DSLVariableDefinitions represents variable definitions in a GraphQL operation
Instances of this class have to be created and set as the `variable_definitions`
attribute of a DSLOperation instance
Attributes of the DSLVariableDefinitions class are generated automatically
with the `__getattr__` dunder method in order to generate
instances of :class:`DSLVariable`, that can then be used as values
in the :meth:`args <gql.dsl.DSLField.args>` method.
"""
def __init__(self):
""":meta private:"""
self.variables: Dict[str, DSLVariable] = {}
def __getattr__(self, name: str) -> "DSLVariable":
if name not in self.variables:
self.variables[name] = DSLVariable(name)
return self.variables[name]
def get_ast_definitions(self) -> Tuple[VariableDefinitionNode, ...]:
"""
:meta private:
Return a list of VariableDefinitionNodes for each variable with a type
"""
return tuple(
VariableDefinitionNode(
type=var.ast_variable_type,
variable=var.ast_variable_name,
default_value=None
if var.default_value is None
else ast_from_value(var.default_value, var.type),
)
for var in self.variables.values()
if var.type is not None # only variables used
)
class DSLType:
"""The DSLType represents a GraphQL type for the DSL code.
It can be a root type (Query, Mutation or Subscription).
Or it can be any other object type (Human in the StarWars schema).
Or it can be an interface type (Character in the StarWars schema).
Instances of this class are generated for you automatically as attributes
of the :class:`DSLSchema`
Attributes of the DSLType class are generated automatically
with the `__getattr__` dunder method in order to generate
instances of :class:`DSLField`
"""
def __init__(
self,
graphql_type: Union[GraphQLObjectType, GraphQLInterfaceType],
dsl_schema: DSLSchema,
):
"""Initialize the DSLType with the GraphQL type.
.. warning::
Don't instantiate this class yourself.
Use attributes of the :class:`DSLSchema` instead.
:param graphql_type: the GraphQL type definition from the schema
:param dsl_schema: reference to the DSLSchema which created this type
"""
self._type: Union[GraphQLObjectType, GraphQLInterfaceType] = graphql_type
self._dsl_schema = dsl_schema
log.debug(f"Creating {self!r})")
def __getattr__(self, name: str) -> "DSLField":
camel_cased_name = to_camel_case(name)
if name in self._type.fields:
formatted_name = name
field = self._type.fields[name]
elif camel_cased_name in self._type.fields:
formatted_name = camel_cased_name
field = self._type.fields[camel_cased_name]
else:
raise AttributeError(
f"Field {name} does not exist in type {self._type.name}."
)
return DSLField(formatted_name, self._type, field, self)
def __repr__(self) -> str:
return f"<{self.__class__.__name__} {self._type!r}>"
class DSLSelectable(ABC):
"""DSLSelectable is an abstract class which indicates that
the subclasses can be used as arguments of the
:meth:`select <gql.dsl.DSLSelector.select>` method.
Inherited by
:class:`DSLField <gql.dsl.DSLField>`,
:class:`DSLFragment <gql.dsl.DSLFragment>`
:class:`DSLInlineFragment <gql.dsl.DSLInlineFragment>`
"""
ast_field: Union[FieldNode, InlineFragmentNode, FragmentSpreadNode]
@staticmethod
def get_aliased_fields(
fields: Iterable["DSLSelectable"],
fields_with_alias: Dict[str, "DSLSelectableWithAlias"],
) -> Tuple["DSLSelectable", ...]:
"""
:meta private:
Concatenate all the fields (with or without alias) in a Tuple.
Set the requested alias for the fields with alias.
"""
return (
*fields,
*(field.alias(alias) for alias, field in fields_with_alias.items()),
)
def __str__(self) -> str:
return print_ast(self.ast_field)
class DSLFragmentSelector(DSLSelector):
"""Class used to define the
:meth:`is_valid_field <gql.dsl.DSLFragmentSelector.is_valid_field>` method
for fragments for the :meth:`select <gql.dsl.DSLSelector.select>` method.
Inherited by
:class:`DSLFragment <gql.dsl.DSLFragment>`,
:class:`DSLInlineFragment <gql.dsl.DSLInlineFragment>`
"""
def is_valid_field(self, field: DSLSelectable) -> bool:
"""Check that a field is valid."""
assert isinstance(self, (DSLFragment, DSLInlineFragment))
if isinstance(field, (DSLFragment, DSLInlineFragment)):
return True
assert isinstance(field, DSLField)
if isinstance(field, DSLMetaField):
return field.name == "__typename"
fragment_type = self._type
assert fragment_type is not None
if field.name in fragment_type.fields.keys():
return fragment_type.fields[field.name].type == field.field.type
return False
class DSLFieldSelector(DSLSelector):
"""Class used to define the
:meth:`is_valid_field <gql.dsl.DSLFieldSelector.is_valid_field>` method
for fields for the :meth:`select <gql.dsl.DSLSelector.select>` method.
Inherited by
:class:`DSLField <gql.dsl.DSLField>`,
"""
def is_valid_field(self, field: DSLSelectable) -> bool:
"""Check that a field is valid."""
assert isinstance(self, DSLField)
if isinstance(field, (DSLFragment, DSLInlineFragment)):
return True
assert isinstance(field, DSLField)
if isinstance(field, DSLMetaField):
return field.name == "__typename"
parent_type = get_named_type(self.field.type)
if not isinstance(parent_type, (GraphQLInterfaceType, GraphQLObjectType)):
return False
if field.name in parent_type.fields.keys():
return parent_type.fields[field.name].type == field.field.type
return False
class DSLSelectableWithAlias(DSLSelectable):
"""DSLSelectableWithAlias is an abstract class which indicates that
the subclasses can be selected with an alias.
"""
ast_field: FieldNode
def alias(self, alias: str) -> "DSLSelectableWithAlias":
"""Set an alias
.. note::
You can also pass the alias directly at the
:meth:`select <gql.dsl.DSLSelector.select>` method.
:code:`ds.Query.human.select(my_name=ds.Character.name)` is equivalent to:
:code:`ds.Query.human.select(ds.Character.name.alias("my_name"))`
:param alias: the alias
:type alias: str
:return: itself
"""
self.ast_field.alias = NameNode(value=alias)
return self
class DSLField(DSLSelectableWithAlias, DSLFieldSelector):
"""The DSLField represents a GraphQL field for the DSL code.
Instances of this class are generated for you automatically as attributes
of the :class:`DSLType`
If this field contains children fields, then you need to select which ones
you want in the request using the :meth:`select <gql.dsl.DSLField.select>`
method.
"""
_type: Union[GraphQLObjectType, GraphQLInterfaceType]
ast_field: FieldNode
field: GraphQLField
def __init__(
self,
name: str,
parent_type: Union[GraphQLObjectType, GraphQLInterfaceType],
field: GraphQLField,
dsl_type: Optional[DSLType] = None,
):
"""Initialize the DSLField.
.. warning::
Don't instantiate this class yourself.
Use attributes of the :class:`DSLType` instead.
:param name: the name of the field
:param parent_type: the GraphQL type definition from the schema of the
parent type of the field
:param field: the GraphQL field definition from the schema
:param dsl_type: reference of the DSLType instance which created this field
"""
self.parent_type = parent_type
self.field = field
self.ast_field = FieldNode(name=NameNode(value=name), arguments=())
self.dsl_type = dsl_type
log.debug(f"Creating {self!r}")
DSLSelector.__init__(self)
@property
def name(self):
""":meta private:"""
return self.ast_field.name.value
def __call__(self, **kwargs) -> "DSLField":
return self.args(**kwargs)
def args(self, **kwargs) -> "DSLField":
r"""Set the arguments of a field
The arguments are parsed to be stored in the AST of this field.
.. note::
You can also call the field directly with your arguments.
:code:`ds.Query.human(id=1000)` is equivalent to:
:code:`ds.Query.human.args(id=1000)`
:param \**kwargs: the arguments (keyword=value)
:return: itself
:raises KeyError: if any of the provided arguments does not exist
for this field.
"""
assert self.ast_field.arguments is not None
self.ast_field.arguments = self.ast_field.arguments + tuple(
ArgumentNode(
name=NameNode(value=name),
value=ast_from_value(value, self._get_argument(name).type),
)
for name, value in kwargs.items()
)
log.debug(f"Added arguments {kwargs} in field {self!r})")
return self
def _get_argument(self, name: str) -> GraphQLArgument:
"""Method used to return the GraphQLArgument definition
of an argument from its name.
:raises KeyError: if the provided argument does not exist
for this field.
"""
arg = self.field.args.get(name)
if arg is None:
raise KeyError(f"Argument {name} does not exist in {self.field}.")
return arg
def select(
self, *fields: "DSLSelectable", **fields_with_alias: "DSLSelectableWithAlias"
) -> "DSLField":
"""Calling :meth:`select <gql.dsl.DSLSelector.select>` method with
corrected typing hints
"""
super().select(*fields, **fields_with_alias)
self.ast_field.selection_set = self.selection_set
return self
def __repr__(self) -> str:
return f"<{self.__class__.__name__} {self.parent_type.name}" f"::{self.name}>"
class DSLMetaField(DSLField):
"""DSLMetaField represents a GraphQL meta-field for the DSL code.
meta-fields are reserved field in the GraphQL type system prefixed with
"__" two underscores and used for introspection.
"""
meta_type = GraphQLObjectType(
"meta_field",
fields={
"__typename": GraphQLField(GraphQLString),
"__schema": GraphQLField(
cast(GraphQLObjectType, introspection_types["__Schema"])
),
"__type": GraphQLField(
cast(GraphQLObjectType, introspection_types["__Type"]),
args={"name": GraphQLArgument(type_=GraphQLNonNull(GraphQLString))},
),
},
)
def __init__(self, name: str):
"""Initialize the meta-field.
:param name: the name between __typename, __schema or __type
"""
try:
field = self.meta_type.fields[name]
except KeyError:
raise GraphQLError(f'Invalid meta-field "{name}"')
super().__init__(name, self.meta_type, field)
class DSLInlineFragment(DSLSelectable, DSLFragmentSelector):
"""DSLInlineFragment represents an inline fragment for the DSL code."""
_type: Union[GraphQLObjectType, GraphQLInterfaceType]
ast_field: InlineFragmentNode
def __init__(
self,
*fields: "DSLSelectable",
**fields_with_alias: "DSLSelectableWithAlias",
):
r"""Initialize the DSLInlineFragment.
:param \*fields: new children fields
:type \*fields: DSLSelectable (DSLField, DSLFragment or DSLInlineFragment)
:param \**fields_with_alias: new children fields with alias as key
:type \**fields_with_alias: DSLField
"""
log.debug(f"Creating {self!r}")
self.ast_field = InlineFragmentNode()
DSLSelector.__init__(self, *fields, **fields_with_alias)
def select(
self, *fields: "DSLSelectable", **fields_with_alias: "DSLSelectableWithAlias"
) -> "DSLInlineFragment":
"""Calling :meth:`select <gql.dsl.DSLSelector.select>` method with
corrected typing hints
"""
super().select(*fields, **fields_with_alias)
self.ast_field.selection_set = self.selection_set
return self
def on(self, type_condition: DSLType) -> "DSLInlineFragment":
"""Provides the GraphQL type of this inline fragment."""
self._type = type_condition._type
self.ast_field.type_condition = NamedTypeNode(
name=NameNode(value=self._type.name)
)
return self
def __repr__(self) -> str:
type_info = ""
try:
type_info += f" on {self._type.name}"
except AttributeError:
pass
return f"<{self.__class__.__name__}{type_info}>"
class DSLFragment(DSLSelectable, DSLFragmentSelector, DSLExecutable):
"""DSLFragment represents a named GraphQL fragment for the DSL code."""
_type: Optional[Union[GraphQLObjectType, GraphQLInterfaceType]]
ast_field: FragmentSpreadNode
name: str
def __init__(
self,
name: str,
):
r"""Initialize the DSLFragment.
:param name: the name of the fragment
:type name: str
"""
DSLExecutable.__init__(self)
self.name = name
self._type = None
log.debug(f"Creating {self!r}")
@property # type: ignore
def ast_field(self) -> FragmentSpreadNode: # type: ignore
"""ast_field property will generate a FragmentSpreadNode with the
provided name.
Note: We need to ignore the type because of
`issue #4125 of mypy <https://github.com/python/mypy/issues/4125>`_.
"""
spread_node = FragmentSpreadNode()
spread_node.name = NameNode(value=self.name)
return spread_node
def select(
self, *fields: "DSLSelectable", **fields_with_alias: "DSLSelectableWithAlias"
) -> "DSLFragment":
"""Calling :meth:`select <gql.dsl.DSLSelector.select>` method with
corrected typing hints
"""
if self._type is None:
raise AttributeError(
"Missing type condition. Please use .on(type_condition) method"
)
super().select(*fields, **fields_with_alias)
return self
def on(self, type_condition: DSLType) -> "DSLFragment":
"""Provides the GraphQL type of this fragment.
:param type_condition: the provided type
:type type_condition: DSLType
"""
self._type = type_condition._type
return self
@property
def executable_ast(self) -> FragmentDefinitionNode:
"""Generates the ast for :func:`dsl_gql <gql.dsl.dsl_gql>`.
:raises AttributeError: if a type has not been provided
"""
assert self.name is not None
if self._type is None:
raise AttributeError(
"Missing type condition. Please use .on(type_condition) method"
)
return FragmentDefinitionNode(
type_condition=NamedTypeNode(name=NameNode(value=self._type.name)),
selection_set=self.selection_set,
variable_definitions=self.variable_definitions.get_ast_definitions(),
name=NameNode(value=self.name),
)
def __repr__(self) -> str:
return f"<{self.__class__.__name__} {self.name!s}>"
|
98fda4b605638f181e89a75066dc361bd3d2f8b9
|
4d913c96c6c4033783402e8e72afbc04bc87e6d0
|
/nautilus/extensions/exif-columns.py
|
ed54c7a97604f5b112dd1cdec37cf2fd399c82dc
|
[] |
no_license
|
NicolasBernaerts/ubuntu-scripts
|
9ae478dcc81af85a472498f247bfccf096431316
|
492ed52351b9b80ac1f99ed1b55e4f267142af88
|
refs/heads/master
| 2023-07-22T01:40:00.476642
| 2023-07-11T06:41:44
| 2023-07-11T06:41:44
| 20,286,893
| 205
| 115
| null | 2022-06-09T07:05:01
| 2014-05-29T08:09:50
|
Shell
|
UTF-8
|
Python
| false
| false
| 6,660
|
py
|
exif-columns.py
|
#!/usr/bin/env python3
# ---------------------------------------------------
# Nautilus extension to add EXIF specific columns
# Dependency :
# - gir1.2-gexiv2-0.10
# Procedure :
# http://www.bernaerts-nicolas.fr/linux/xxx...
#
# Revision history :
# 20/09/2016, V1.0 - Creation by N. Bernaerts
# 25/04/2020, v2.0 - rewrite for python3 compatibility
# ---------------------------------------------------
# -------------------
# Import libraries
# -------------------
import gi
gi.require_version("GExiv2", "0.10")
from urllib.parse import unquote
from gi.repository import Nautilus, GObject
from gi.repository.GExiv2 import Metadata
# -------------------
# Column extension
# -------------------
class ExifColumnExtension(GObject.GObject, Nautilus.ColumnProvider, Nautilus.InfoProvider):
def __init__(self): pass
# -----------------------------
# List of available columns
# -----------------------------
def get_columns(self):
return (
Nautilus.Column(name="NautilusPython::ExifWidth", attribute="exif_width", label=" Width ", description="Image width"),
Nautilus.Column(name="NautilusPython::ExifHeight", attribute="exif_height", label=" Height ", description="Image height"),
Nautilus.Column(name="NautilusPython::ExifAperture", attribute="exif_apert", label="Aperture", description="Aperture"),
Nautilus.Column(name="NautilusPython::ExifFocal", attribute="exif_focal", label="Focal", description="Focal length (35mm)"),
Nautilus.Column(name="NautilusPython::ExifCity", attribute="exif_city", label="City", description="City"),
Nautilus.Column(name="NautilusPython::ExifCountry", attribute="exif_country", label="Country", description="Country"),
Nautilus.Column(name="NautilusPython::ExifGPS", attribute="exif_gps", label="GPS", description="GPS"),
Nautilus.Column(name="NautilusPython::ExifMaker", attribute="exif_maker", label="Manufacturer", description="Camera manufacturer"),
Nautilus.Column(name="NautilusPython::ExifModel", attribute="exif_model", label="Model", description="Camera model"),
)
# ------------------------
# Retrieve file values
# ------------------------
def update_file_info(self, file):
# test file type
if file.get_uri_scheme() != 'file': return
# read data only if image file
mimetype = file.get_mime_type().split('/')
if mimetype[0] == "image":
# format filename
filename = unquote(file.get_uri()[7:])
# get metadata
self.tags = Metadata()
self.tags.open_path(filename)
# image width
tag_value = ""
if self.tags.has_tag('Exif.Photo.PixelXDimension'): tag_value = self.tags.get_tag_string('Exif.Photo.PixelXDimension')
if tag_value == "" and self.tags.has_tag('Exif.Image.ImageWidth'): tag_value = self.tags.get_tag_string('Exif.Image.ImageWidth')
file.add_string_attribute('exif_width', tag_value)
# image height
tag_value = ""
if self.tags.has_tag('Exif.Photo.PixelYDimension'): tag_value = self.tags.get_tag_string('Exif.Photo.PixelYDimension')
if tag_value == "" and self.tags.has_tag('Exif.Image.ImageLength'): tag_value = self.tags.get_tag_string('Exif.Image.ImageLength')
file.add_string_attribute('exif_height', tag_value)
# camera manufacturer
tag_value = ""
if self.tags.has_tag('Xmp.VendorInfo.Manufacturer'): tag_value = self.tags.get_tag_interpreted_string('Xmp.VendorInfo.Manufacturer')
if tag_value == "" and self.tags.has_tag('Exif.Image.Make'): tag_value = self.tags.get_tag_interpreted_string('Exif.Image.Make')
file.add_string_attribute('exif_maker', tag_value)
# camera model
tag_value = ""
if self.tags.has_tag('Xmp.VendorInfo.Model'): tag_value = self.tags.get_tag_interpreted_string('Xmp.VendorInfo.Model')
if tag_value == "" and self.tags.has_tag('Exif.Image.Model'): tag_value = self.tags.get_tag_interpreted_string('Exif.Image.Model')
file.add_string_attribute('exif_model', tag_value)
# camera focal length
tag_value = ""
if self.tags.has_tag('Xmp.Exif.FocalLengthIn35mmFormat'): tag_value = self.tags.get_tag_interpreted_string('Xmp.Exif.FocalLengthIn35mmFormat')
if tag_value == "" and self.tags.has_tag('Exif.Photo.FocalLengthIn35mmFilm'): tag_value = self.tags.get_tag_interpreted_string('Exif.Photo.FocalLengthIn35mmFilm')
if tag_value == "" and self.tags.has_tag('Exif.Photo.FocalLength'): tag_value = self.tags.get_tag_interpreted_string('Exif.Photo.FocalLength')
file.add_string_attribute('exif_focal', tag_value)
# camera aperture
tag_value = ""
if self.tags.has_tag('Xmp.Exif.ApertureValue'): tag_value = self.tags.get_tag_interpreted_string('Xmp.Exif.ApertureValue')
if tag_value == "" and self.tags.has_tag('Exif.Photo.ApertureValue'): tag_value = self.tags.get_tag_interpreted_string('Exif.Photo.ApertureValue')
if tag_value == "" and self.tags.has_tag('Exif.Photo.FNumber'): tag_value = self.tags.get_tag_interpreted_string('Exif.Photo.FNumber')
file.add_string_attribute('exif_apert', tag_value)
# city tag
tag_value = ""
if self.tags.has_tag('Xmp.City'): tag_value = self.tags.get_tag_interpreted_string('Xmp.City')
if tag_value == "" and self.tags.has_tag('Xmp.photoshop.City'): tag_value = self.tags.get_tag_interpreted_string('Xmp.photoshop.City')
if tag_value == "" and self.tags.has_tag('Iptc.City'): tag_value = self.tags.get_tag_interpreted_string('Iptc.City')
file.add_string_attribute('exif_city', tag_value)
# country tag
tag_value = ""
if self.tags.has_tag('Xmp.CountryName'): tag_value = self.tags.get_tag_interpreted_string('Xmp.CountryName')
if tag_value == "" and self.tags.has_tag('Xmp.photoshop.Country'): tag_value = self.tags.get_tag_interpreted_string('Xmp.photoshop.Country')
file.add_string_attribute('exif_country', tag_value)
# GPS tag
tag_value = "no"
if self.tags.has_tag('Xmp.Exif.GPSLatitude'): tag_value = "yes"
if tag_value == "no" and self.tags.has_tag('Xmp.LocationDetails.GPSLatitude'): tag_value = "yes"
if tag_value == "no" and self.tags.has_tag('Exif.GPSInfo.GPSLatitude'): tag_value = "yes"
file.add_string_attribute('exif_gps', tag_value)
# else, file is not an image
else:
file.add_string_attribute('exif_maker', "")
file.add_string_attribute('exif_model', "")
file.add_string_attribute('exif_focal', "")
file.add_string_attribute('exif_city', "")
file.add_string_attribute('exif_country', "")
file.add_string_attribute('exif_gps', "")
|
5c5af3cfa38b66cb8259ffab3f8fad7bfdf81680
|
cfb2a8652fe0afbcbbf2287f4f736ff85ce47d30
|
/tests/sample7.py
|
13c8f1572321dafbd600f5beefdf1272c03f09b9
|
[
"BSD-2-Clause"
] |
permissive
|
ionelmc/python-hunter
|
238ad366c9aae00cf1a249fd90993f31404e7087
|
cfae650dd2b7a89e5bf9eb81b109f268397c45e9
|
refs/heads/master
| 2023-08-14T23:47:51.328651
| 2023-04-26T09:11:54
| 2023-04-26T09:11:54
| 32,343,292
| 800
| 44
|
BSD-2-Clause
| 2022-09-09T19:31:05
| 2015-03-16T18:03:16
|
Python
|
UTF-8
|
Python
| false
| false
| 415
|
py
|
sample7.py
|
from __future__ import print_function
import os
import sys
def one():
for i in range(1): # one
two()
def two():
for i in range(1): # two
three()
def three():
for i in range(1): # three
four()
def four():
for i in range(1): # four
five()
def five():
in_five = 1
for i in range(1): # five
return i
if __name__ == '__main__':
one()
|
18795767ee04a2e3912c8771e908ff4854a22af7
|
8d9af25fb8878435de994c182a8d7fa6424a8122
|
/tableauserverclient/server/endpoint/tasks_endpoint.py
|
ad1702f58b563e89baa73d707c06ff2faafcecb7
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain",
"MIT"
] |
permissive
|
tableau/server-client-python
|
a0f015b3dffdba1f403120a446d2ac9ce29fdaca
|
307d8a20a30f32c1ce615cca7c6a78b9b9bff081
|
refs/heads/master
| 2023-08-30T04:39:34.860319
| 2023-04-24T20:08:23
| 2023-04-24T20:08:23
| 62,177,327
| 607
| 429
|
MIT
| 2023-09-12T03:48:49
| 2016-06-28T22:05:32
|
Python
|
UTF-8
|
Python
| false
| false
| 3,154
|
py
|
tasks_endpoint.py
|
import logging
from .endpoint import Endpoint, api
from .exceptions import MissingRequiredFieldError
from tableauserverclient.models import TaskItem, PaginationItem
from tableauserverclient.server import RequestFactory
from tableauserverclient.helpers.logging import logger
class Tasks(Endpoint):
@property
def baseurl(self):
return "{0}/sites/{1}/tasks".format(self.parent_srv.baseurl, self.parent_srv.site_id)
def __normalize_task_type(self, task_type):
"""
The word for extract refresh used in API URL is "extractRefreshes".
It is different than the tag "extractRefresh" used in the request body.
"""
if task_type == TaskItem.Type.ExtractRefresh:
return "{}es".format(task_type)
else:
return task_type
@api(version="2.6")
def get(self, req_options=None, task_type=TaskItem.Type.ExtractRefresh):
if task_type == TaskItem.Type.DataAcceleration:
self.parent_srv.assert_at_least_version("3.8", "Data Acceleration Tasks")
logger.info("Querying all {} tasks for the site".format(task_type))
url = "{0}/{1}".format(self.baseurl, self.__normalize_task_type(task_type))
server_response = self.get_request(url, req_options)
pagination_item = PaginationItem.from_response(server_response.content, self.parent_srv.namespace)
all_tasks = TaskItem.from_response(server_response.content, self.parent_srv.namespace, task_type)
return all_tasks, pagination_item
@api(version="2.6")
def get_by_id(self, task_id):
if not task_id:
error = "No Task ID provided"
raise ValueError(error)
logger.info("Querying a single task by id ({})".format(task_id))
url = "{}/{}/{}".format(
self.baseurl,
self.__normalize_task_type(TaskItem.Type.ExtractRefresh),
task_id,
)
server_response = self.get_request(url)
return TaskItem.from_response(server_response.content, self.parent_srv.namespace)[0]
@api(version="2.6")
def run(self, task_item):
if not task_item.id:
error = "Task item missing ID."
raise MissingRequiredFieldError(error)
url = "{0}/{1}/{2}/runNow".format(
self.baseurl,
self.__normalize_task_type(TaskItem.Type.ExtractRefresh),
task_item.id,
)
run_req = RequestFactory.Task.run_req(task_item)
server_response = self.post_request(url, run_req)
return server_response.content # Todo add typing
# Delete 1 task by id
@api(version="3.6")
def delete(self, task_id, task_type=TaskItem.Type.ExtractRefresh):
if task_type == TaskItem.Type.DataAcceleration:
self.parent_srv.assert_at_least_version("3.8", "Data Acceleration Tasks")
if not task_id:
error = "No Task ID provided"
raise ValueError(error)
url = "{0}/{1}/{2}".format(self.baseurl, self.__normalize_task_type(task_type), task_id)
self.delete_request(url)
logger.info("Deleted single task (ID: {0})".format(task_id))
|
524b99149e8db016605ee52e984e4f08037c85a3
|
110044654f706e920380dad2779bb32a77f1f26f
|
/test/gnutools.py
|
eb0d8476132e6530a131ec5daf77fea8d1c64dcc
|
[
"MIT",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
SCons/scons
|
89327bb9635cee6e7cc59249edca9cd859d7d1ff
|
b2a7d7066a2b854460a334a5fe737ea389655e6e
|
refs/heads/master
| 2023-09-01T19:37:03.603772
| 2023-08-28T04:32:42
| 2023-08-28T04:32:42
| 104,670,160
| 1,827
| 342
|
MIT
| 2023-09-14T15:13:21
| 2017-09-24T19:23:46
|
Python
|
UTF-8
|
Python
| false
| false
| 4,804
|
py
|
gnutools.py
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Testing the gnu tool chain, i.e. the tools 'gcc', 'g++' and 'gnulink'.
"""
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import TestSCons
import sys
_python_ = TestSCons._python_
_exe = TestSCons._exe
def dll(s):
return TestSCons.dll_ + s + TestSCons._dll
test = TestSCons.TestSCons()
test.subdir('gnutools')
test.write(['gnutools','mygcc.py'], """
import getopt
import sys
try:
cmd_opts, args = getopt.getopt(sys.argv[1:], 'f:s:co:', [])
except getopt.GetoptError:
# we may be called with --version, just quit if so
sys.exit(0)
out = None
opt_string = ''
for opt, arg in cmd_opts:
if opt == '-o': out = arg
else: opt_string = opt_string + ' ' + opt + arg
with open(out, 'w') as ofp:
ofp.write('gcc ' + opt_string + '\\n')
for a in args:
with open(a, 'r') as ifp:
ofp.write(ifp.read())
sys.exit(0)
""")
test.write(['gnutools','myg++.py'], """
import getopt
import sys
try:
cmd_opts, args = getopt.getopt(sys.argv[1:], 'f:s:co:', [])
except getopt.GetoptError:
# we may be called with --version, just quit if so
sys.exit(0)
out = None
opt_string = ''
for opt, arg in cmd_opts:
if opt == '-o': out = arg
else: opt_string = opt_string + ' ' + opt + arg
with open(out, 'w') as ofp:
ofp.write('g++ ' + opt_string + '\\n')
for a in args:
with open(a, 'r') as ifp:
ofp.write(ifp.read())
sys.exit(0)
""")
test.subdir('work1')
test.write(['work1', 'cfile1.c'],"""
/* c file 1 */
""")
test.write(['work1', 'cfile2.c'],"""
/* c file 2 */
""")
test.write(['work1', 'cppfile1.cpp'],"""
/* cpp file 1 */
""")
test.write(['work1', 'cppfile2.cpp'],"""
/* cpp file 2 */
""")
mygcc_py = test.workpath('gnutools','mygcc.py')
mygxx_py = test.workpath('gnutools','myg++.py')
test.write(['work1', 'SConstruct'],"""
env = Environment(tools=['gcc','g++','gnulink'],
CC=r'%(_python_)s %(mygcc_py)s',
CXX=r'%(_python_)s %(mygxx_py)s',
OBJSUFFIX='.o',
SHOBJSUFFIX='.os')
env.Program('c-only', Split('cfile1.c cfile2.c'))
env.Program('cpp-only', Split('cppfile1.cpp cppfile2.cpp'))
env.Program('c-and-cpp', Split('cfile1.c cppfile1.cpp'))
env.SharedLibrary('c-only', Split('cfile1.c cfile2.c'))
env.SharedLibrary('cpp-only', Split('cppfile1.cpp cppfile2.cpp'))
env.SharedLibrary('c-and-cpp', Split('cfile1.c cppfile1.cpp'))
""" % locals())
test.run(chdir='work1')
def testObject(test, obj, expect):
contents = test.read(test.workpath('work1', obj))
line1 = contents.split(b'\n')[0]
actual = b' '.join(line1.split())
if not expect == actual:
print("%s: %s != %s\n" % (obj, repr(expect), repr(actual)))
test.fail_test()
if sys.platform in ('win32', 'cygwin'):
c_fpic = b''
else:
c_fpic = b' -fPIC'
testObject(test, 'cfile1.o', b'gcc -c')
testObject(test, 'cfile2.o', b'gcc -c')
testObject(test, 'cppfile1.o', b'g++ -c')
testObject(test, 'cppfile2.o', b'g++ -c')
testObject(test, 'cfile1.os', b'gcc -c' + c_fpic)
testObject(test, 'cfile2.os', b'gcc -c' + c_fpic)
testObject(test, 'cppfile1.os', b'g++ -c' + c_fpic)
testObject(test, 'cppfile2.os', b'g++ -c' + c_fpic)
testObject(test, 'c-only' + _exe, b'gcc')
testObject(test, 'cpp-only' + _exe, b'g++')
testObject(test, 'c-and-cpp' + _exe, b'g++')
testObject(test, dll('c-only'), b'gcc -shared')
testObject(test, dll('cpp-only'), b'g++ -shared')
testObject(test, dll('c-and-cpp'), b'g++ -shared')
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
8eff3a317a9217faa7bf54c9f29becb66511a18f
|
5dc35a39169d191166c44b7ac45389a14e5b0857
|
/rest/notification/list-get-example-2/list-get-example-2.8.x.py
|
53d41bdbaa75b06a2b6cb81cca2b00d9f53b4867
|
[
"MIT"
] |
permissive
|
TwilioDevEd/api-snippets
|
035b7ceaf9c03c932010919ee1747bf895d4591e
|
ca6160d8e626bdf137f783324807285bb01d006f
|
refs/heads/master
| 2023-08-17T19:20:35.751733
| 2023-08-04T18:47:58
| 2023-08-04T18:47:58
| 49,965,712
| 267
| 551
|
MIT
| 2023-09-11T14:04:34
| 2016-01-19T16:21:44
|
Java
|
UTF-8
|
Python
| false
| false
| 577
|
py
|
list-get-example-2.8.x.py
|
# Download the Python helper library from twilio.com/docs/python/install
import os
from twilio.rest import Client
from datetime import date
# Your Account Sid and Auth Token from twilio.com/user/account
# To set up environmental variables, see http://twil.io/secure
account_sid = os.environ['TWILIO_ACCOUNT_SID']
auth_token = os.environ['TWILIO_AUTH_TOKEN']
client = Client(account_sid, auth_token)
# A list of notification objects with the properties described above
notifications = client.notifications \
.list(message_date=date(2009, 7, 6), log="1")
|
e72be2f1841df4920bfee26ba1c808e79f5d58a7
|
56a77194fc0cd6087b0c2ca1fb6dc0de64b8a58a
|
/applications/MeshingApplication/tests/test_mpi_parmmg.py
|
9063dab435a8b06d4bd9d914182c1a24d798896c
|
[
"BSD-3-Clause"
] |
permissive
|
KratosMultiphysics/Kratos
|
82b902a2266625b25f17239b42da958611a4b9c5
|
366949ec4e3651702edc6ac3061d2988f10dd271
|
refs/heads/master
| 2023-08-30T20:31:37.818693
| 2023-08-30T18:01:01
| 2023-08-30T18:01:01
| 81,815,495
| 994
| 285
|
NOASSERTION
| 2023-09-14T13:22:43
| 2017-02-13T10:58:24
|
C++
|
UTF-8
|
Python
| false
| false
| 10,361
|
py
|
test_mpi_parmmg.py
|
import os
import math
import json
import KratosMultiphysics
from KratosMultiphysics import ParallelEnvironment, IsDistributedRun
import KratosMultiphysics.MeshingApplication
import KratosMultiphysics.kratos_utilities as kratos_utilities
import KratosMultiphysics.KratosUnittest as KratosUnittest
from KratosMultiphysics.testing.utilities import ReadModelPart
def GetFilePath(fileName):
return os.path.dirname(os.path.realpath(__file__)) + "/" + fileName
class TestMPIParMmg(KratosUnittest.TestCase):
@KratosUnittest.skipUnless(IsDistributedRun() and ParallelEnvironment.GetDefaultSize() <= 4, "Test designed to be run with max. 4 ranks.")
def test_mpi_sphere(self):
KratosMultiphysics.Logger.GetDefaultOutput().SetSeverity(KratosMultiphysics.Logger.Severity.WARNING)
# We create the model part
current_model = KratosMultiphysics.Model()
main_model_part = current_model.CreateModelPart("MainModelPart")
main_model_part.ProcessInfo.SetValue(KratosMultiphysics.DOMAIN_SIZE, 3)
# We add the variables needed
main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.DISTANCE)
main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.DISTANCE_GRADIENT)
# We import the model main_model_part
file_path = GetFilePath("/parmmg_eulerian_test/background_mesh_sphere")
ReadModelPart(file_path, main_model_part)
communicator = main_model_part.GetCommunicator().GetDataCommunicator()
for node in main_model_part.Nodes:
distance = math.sqrt(node.X**2+node.Y**2+node.Z**2) - 1.0/2.0
node.SetSolutionStepValue(KratosMultiphysics.DISTANCE,distance)
# Setting some flags on submodelparts entities arbitrarily
for cond in main_model_part.GetSubModelPart("SurfaceLoad3D_Load_on_surfaces_Auto3").Conditions:
cond.Set(KratosMultiphysics.STRUCTURE)
for elem in main_model_part.GetSubModelPart("Parts_Solid_Solid_Auto1").Elements:
elem.Set(KratosMultiphysics.VISITED)
##COMPUTE DISTANCE GRADIENT AND NODAL_H
local_gradient = KratosMultiphysics.ComputeNodalGradientProcess3D(main_model_part,
KratosMultiphysics.DISTANCE,
KratosMultiphysics.DISTANCE_GRADIENT,
KratosMultiphysics.NODAL_AREA)
local_gradient.Execute()
find_nodal_h = KratosMultiphysics.FindNodalHNonHistoricalProcess(main_model_part)
find_nodal_h.Execute()
##COMPUTE LEVEL SET METRIC
metric_parameters = KratosMultiphysics.Parameters("""
{
"minimal_size" : 0.5,
"sizing_parameters": {
"reference_variable_name" : "DISTANCE",
"boundary_layer_max_distance" : 2.0,
"interpolation" : "constant"
},
"enforce_current" : false,
"anisotropy_remeshing" : false
}
""")
metric_process = KratosMultiphysics.MeshingApplication.ComputeLevelSetSolMetricProcess3D(
main_model_part,
KratosMultiphysics.DISTANCE_GRADIENT,
metric_parameters)
metric_process.Execute()
##PERFORM REMESHING
pmmg_parameters = KratosMultiphysics.Parameters("""
{
"filename" : "output",
"save_external_files" : true,
"save_colors_files" : true,
"initialize_entities" : false,
"preserve_flags" : true,
"echo_level" : 0,
"advanced_parameters" : {
"number_of_iterations" : 4,
"no_surf_mesh" : true
}
}
""")
pmmg_parameters["filename"].SetString(GetFilePath(pmmg_parameters["filename"].GetString()))
pmmg_process = KratosMultiphysics.MeshingApplication.ParMmgProcess3D(main_model_part.GetRootModelPart(), pmmg_parameters)
pmmg_process.Execute()
reference_file_name = GetFilePath("parmmg_eulerian_test/cond_ref_map.json")
result_file_name = GetFilePath("output_step=0_"+str(communicator.Rank())+".cond.ref.json")
self._CompareColorFiles(reference_file_name, result_file_name)
reference_file_name = GetFilePath("parmmg_eulerian_test/elem_ref_map.json")
result_file_name = GetFilePath("output_step=0_"+str(communicator.Rank())+".elem.ref.json")
self._CompareColorFiles(reference_file_name, result_file_name)
result_dict_file_name=GetFilePath("parmmg_eulerian_test/reference_parmmg_spehere_mdpa_hierarchy.json")
with open(result_dict_file_name, 'r') as f:
reference_hierarchy = json.load(f)
self.CheckModelPartHierarchie(main_model_part, reference_hierarchy[str(communicator.Size())])
# Check flags are correctly set on the corresponding submodelparts only
for cond in main_model_part.Conditions:
if cond in main_model_part.GetSubModelPart("SurfaceLoad3D_Load_on_surfaces_Auto3").Conditions:
self.assertTrue(cond.Is(KratosMultiphysics.STRUCTURE))
else:
self.assertTrue(cond.IsNot(KratosMultiphysics.STRUCTURE))
for elem in main_model_part.Elements:
if elem in main_model_part.GetSubModelPart("Parts_Solid_Solid_Auto1").Elements:
self.assertTrue(elem.Is(KratosMultiphysics.VISITED))
else:
self.assertTrue(elem.IsNot(KratosMultiphysics.VISITED))
for file_name in os.listdir(GetFilePath("")):
if file_name.endswith(".json") or file_name.endswith(".mdpa") or file_name.endswith(".mesh") or file_name.endswith(".sol"):
kratos_utilities.DeleteFileIfExisting(GetFilePath(file_name))
kratos_utilities.DeleteTimeFiles(os.getcwd())
def _CompareColorFiles(self, ref_dict_filename, result_dict_file_name):
with open(ref_dict_filename, 'r') as f:
reference_values = json.load(f)
with open(result_dict_file_name, 'r') as f:
result_values = json.load(f)
self.assertEqual(len(reference_values.keys()), len(result_values.keys()))
for key_ref, key_result in zip(reference_values.keys(), result_values.keys()):
self.assertEqual(reference_values[key_ref], result_values[key_result])
def _CheckModelPart(self, ref_model_part, result_model_part):
self.assertEqual(ref_model_part.NumberOfNodes(), result_model_part.NumberOfNodes())
self.assertEqual(ref_model_part.NumberOfElements(), result_model_part.NumberOfElements())
self.assertEqual(ref_model_part.NumberOfConditions(), result_model_part.NumberOfConditions())
def CheckModelPartHierarchie(self, model_part, hierarchie):
"""Checking if the hierarchie of a ModelPart matches the expected one
This is intended to check larger models, where it is not feasible
save large mdpa-files as references
the hierarchie is a dict with the structure of the ModelPart. E.g.:
{
"name_model_part" : {
"nodes": 15
"elements": 11
"conditions": 5
"properties": 2,
"sub_model_parts" : {
"domain" : {
"nodes": 15,
"elements" : 11,
"properties" :1
"sub_model_parts" : {
"sub_domain" : {
"nodes" : 3,
"elements" : 2
}
}
},
"boundary" : {
"nodes": 6
"conditions" : 5,
"properties" : 1
}
}
}
}
}
"""
def CheckModelPartHierarchieNumbers(smp, smp_hierarchie):
comm = smp.GetCommunicator().GetDataCommunicator()
local_number_of_nodes = smp.GetCommunicator().LocalMesh().NumberOfNodes()
local_number_of_elem = smp.GetCommunicator().LocalMesh().NumberOfElements()
local_number_of_cond = smp.GetCommunicator().LocalMesh().NumberOfConditions()
local_number_of_prop = smp.GetCommunicator().LocalMesh().NumberOfProperties()
exp_num = smp_hierarchie.get("nodes", 0)
self.assertEqual(comm.SumAll(local_number_of_nodes), exp_num, msg='ModelPart "{}" is expected to have {} nodes but has {}'.format(smp.FullName(), exp_num, smp.NumberOfNodes()))
exp_num = smp_hierarchie.get("elements", 0)
self.assertEqual(comm.SumAll(local_number_of_elem), exp_num, msg='ModelPart "{}" is expected to have {} elements but has {}'.format(smp.FullName(), exp_num, smp.NumberOfElements()))
exp_num = smp_hierarchie.get("conditions", 0)
self.assertEqual(comm.SumAll(local_number_of_cond), exp_num, msg='ModelPart "{}" is expected to have {} conditions but has {}'.format(smp.FullName(), exp_num, smp.NumberOfConditions()))
exp_num = smp_hierarchie.get("properties", 0)
self.assertEqual(comm.SumAll(local_number_of_prop), exp_num, msg='ModelPart "{}" is expected to have {} properties but has {}'.format(smp.FullName(), exp_num, smp.NumberOfProperties()))
if "sub_model_parts" in smp_hierarchie:
smp_hierarchie = smp_hierarchie["sub_model_parts"]
for name_smp in smp_hierarchie:
self.assertTrue(smp.HasSubModelPart(name_smp), msg='ModelPart "{}" does not have SubModelPart with name "{}"'.format(smp.FullName(), name_smp))
CheckModelPartHierarchieNumbers(smp.GetSubModelPart(name_smp), smp_hierarchie[name_smp])
# check name of MainModelPart
self.assertEqual(len(hierarchie), 1)
name_main_model_part = hierarchie.__iter__().__next__()
self.assertEqual(model_part.Name, name_main_model_part)
CheckModelPartHierarchieNumbers(model_part, hierarchie[name_main_model_part])
if __name__ == '__main__':
KratosUnittest.main()
|
8e5e95ed8739e9dba969bb0ad6d637b8851f5289
|
66ce19daa74e0d1e796300b27f66aedea0820b13
|
/cramming/architectures/losses.py
|
642ad0f6e5302926d5978c0f0efe516676f35277
|
[
"MIT"
] |
permissive
|
JonasGeiping/cramming
|
5a8026858fb730660959439c47c3c5e4ebf1722a
|
1397b8c8ecf11e7a8e714d17d44f44e3664af711
|
refs/heads/main
| 2023-08-17T07:18:49.370984
| 2023-08-07T14:13:26
| 2023-08-07T14:13:26
| 583,172,165
| 1,111
| 87
|
MIT
| 2023-06-13T16:49:39
| 2022-12-29T01:29:41
|
Python
|
UTF-8
|
Python
| false
| false
| 7,934
|
py
|
losses.py
|
"""Alternatives to CrossEntropyLoss. Currently not hooked into crammed_bert.py, but tried with a previous version."""
import torch
import math
class CrossEntropyWithZLoss(torch.nn.Module):
"""Cross Entropy plus logit regularization via z_loss."""
__constants__ = ["ignore_index", "z_loss_factor"]
ignore_index: int
z_loss_factor: float
def __init__(self, ignore_index=-100, z_loss_factor=1e-4):
super().__init__()
self.loss_fn = torch.nn.CrossEntropyLoss(ignore_index=ignore_index)
self.z_loss_factor = z_loss_factor
self.ignore_index = ignore_index
def forward(self, inputs, labels):
"""Is this is the optimal implementation? Is this even what is meant?
I wish there were more answers or code for PaLM
This implementation assumes that log(Z) is log(sum(exp(logits))).
The usage of log2 here is also a bit wild...
"""
z_reg = inputs.exp().sum(dim=-1).log2().sum() * self.z_loss_factor
return self.loss_fn(inputs, labels) + z_reg
class MSELoss(torch.nn.Module):
"""MSE Loss as a drop-in replacement for Cross Entropy Loss.
This implementation includes a mean reduction in batch dimension and a 1/num_classes/M reduction in classes."""
def __init__(self, ignore_index=-100):
"""Parameters as in Hui&Belkin, 2021, but k=1, and M=sqrt(C) (so maybe not really Hui&Belkin?)"""
super().__init__()
self.ignore_index = ignore_index
def forward(self, inputs, labels):
"""Is this is the optimal implementation? Could also do an index_select variation..."""
num_classes = inputs.shape[-1]
valid_mask = labels != self.ignore_index
M = math.sqrt(num_classes)
onehot_labels = self._label_to_onehot(labels[valid_mask], M, num_classes=num_classes)
return 1 / (2 * M * num_classes) * (inputs[valid_mask] - onehot_labels).pow(2).sum()
@staticmethod
@torch.jit.script
def _label_to_onehot(target, M: float = 1.0, num_classes: int = 100):
onehot_target = torch.zeros(target.shape[0], num_classes, device=target.device)
onehot_target.scatter_(1, target.view(-1, 1), M)
return onehot_target
class MSELossFast(torch.nn.Module):
"""MSE Loss as a drop-in replacement for Cross Entropy Loss. Only for 2dim inputs and 1dim labels
This implementation includes a mean reduction in batch dimension and a 1/num_classes/M reduction in classes."""
def __init__(self, ignore_index=-100):
"""Parameters as in Hui&Belkin, 2021, but k=1, and M=sqrt(C) (so maybe not really Hui&Belkin?)"""
super().__init__()
self.ignore_index = ignore_index
def forward(self, inputs, labels):
"""Is this is the optimal implementation? This at least circumvents literal 1-hot labels"""
num_examples, num_classes = inputs.shape
valid_mask = labels != self.ignore_index
M = math.sqrt(num_classes)
inputs = inputs[valid_mask]
labels = labels[valid_mask]
x_i = inputs.pow(2).sum()
x_j = inputs[torch.arange(labels.shape[-1]), labels].sum()
return 1 / (2 * M * num_classes) * (x_i - 2 * M * x_j + labels.shape[-1] * M**2)
class L1Loss(torch.nn.Module):
"""L1 Loss as a drop-in replacement for Cross Entropy Loss. Only for 2dim inputs and 1dim labels
This implementation includes a mean reduction in batch dimension and a 1/num_classes reduction in classes."""
def __init__(self, ignore_index=-100):
"""."""
super().__init__()
self.ignore_index = ignore_index
def forward(self, inputs, labels):
"""Optimal scaling is less clear for L1"""
num_classes = inputs.shape[-1]
valid_mask = labels != self.ignore_index
M = math.sqrt(num_classes)
onehot_labels = self._label_to_onehot(labels[valid_mask], float(num_classes), num_classes=num_classes)
return 1 / inputs.shape[0] / M * (inputs[valid_mask] - onehot_labels).abs().sum()
@staticmethod
@torch.jit.script
def _label_to_onehot(target, M: float = 1.0, num_classes: int = 100):
onehot_target = torch.zeros(target.shape[0], num_classes, device=target.device)
onehot_target.scatter_(1, target.view(-1, 1), M)
return onehot_target
class SzegedyLoss(torch.nn.Module):
"""Regression directly back to input embedding. Remove the decoding layer if using this loss.
As mentioned at https://twitter.com/ChrSzegedy/status/1533322132368728064?t=xz00T1YT3-WiE0id-h3MEA&s=19
"""
def __init__(self, embedding_layer, ignore_index=-100, overrelaxation=2.0):
"""Overrelax parameter is quite a bit speculative..."""
super().__init__()
self.embedding = embedding_layer
self.ignore_index = ignore_index
self.overrelaxation = overrelaxation
def forward(self, inputs, labels):
"""This really just does L2(DNN(embed(x[:,:-1]), 2.0 * stop_gradient(embed(x[:,1:]))) as quoted above"""
num_examples, num_classes = inputs.shape
valid_mask = labels != self.ignore_index
M = math.sqrt(num_classes)
inputs = inputs[valid_mask]
with torch.no_grad():
embedded_labels = self.overrelaxation * self.embedding(labels)[valid_mask]
return (inputs - embedded_labels).pow(2).sum() / labels.shape[-1] / num_classes
"""Focal Loss from https://github.com/clcarwin/focal_loss_pytorch (minimally modernized into pytorch 1.12)"""
"""
MIT License
Copyright (c) 2017 carwin
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
class FocalLoss(torch.nn.Module):
def __init__(self, gamma: float = 5.0, size_average: bool = True, ignore_index: int = -100):
super().__init__()
self.register_buffer("gamma", torch.as_tensor(gamma, dtype=torch.float), persistent=False)
self.size_average = size_average
self.ignore_index = ignore_index
def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
valid_mask = target != self.ignore_index
log_probs = torch.nn.functional.log_softmax(input[valid_mask]).gather(1, target[None, valid_mask])
loss = -1 * (1 - log_probs.exp()) ** self.gamma * log_probs
if self.size_average:
return loss.mean()
else:
return loss.sum()
class IncorrectCrossEntropyLoss(torch.nn.CrossEntropyLoss):
"""CrossEntropyLoss, but only on incorrectly classified examples."""
def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
with torch.no_grad():
incorrect_preds = input.argmax(dim=-1) != target
return torch.nn.functional.cross_entropy(
input[incorrect_preds],
target[incorrect_preds],
weight=self.weight,
ignore_index=self.ignore_index,
reduction=self.reduction,
label_smoothing=self.label_smoothing,
)
|
428fded2efa2b793eb433e98bdc7c086671d1817
|
578db86c51d44ebddd0dc7b1738985b3dc69eb74
|
/corehq/apps/userreports/ui/help_text.py
|
a2820d0169a9773d2881319e016d7352eed61288
|
[
"BSD-3-Clause"
] |
permissive
|
dimagi/commcare-hq
|
a43c7dd32b5f89c89fd5aa1b1359ab7301f4ff6b
|
e7391ddae1af1dbf118211ecb52c83fc508aa656
|
refs/heads/master
| 2023-08-16T22:38:27.853437
| 2023-08-16T19:07:19
| 2023-08-16T19:07:19
| 247,278
| 499
| 203
|
BSD-3-Clause
| 2023-09-14T19:03:24
| 2009-07-09T17:00:07
|
Python
|
UTF-8
|
Python
| false
| false
| 2,070
|
py
|
help_text.py
|
from django.utils.translation import gettext_lazy as _
TABLE_ID = _(
"Choose something short, unique, and memorable using "
"lowercase letters, numbers, and underscores")
REPORT_ID = _(
'The System ID of the report (sometimes needed in APIs or other advanced features)'
)
DATA_SOURCE_ID = _(
'The System ID of the data source (sometimes needed in APIs or other advanced features)'
)
DISPLAY_NAME = _(
"This is what the data source will be called in navigation, page title, etc.")
DESCRIPTION = _(
"Write yourself a little note if you like, it's optional")
BASE_ITEM_EXPRESSION = _(
'You can leave this blank unless you are '
'<a target="_blank" href="'
'https://commcare-hq.readthedocs.io/ucr.html#saving-multiple-rows-per-case-form'
'">saving multiple rows per case or form</a>')
CONFIGURED_FILTER = _(
'Look at '
'<a target="_blank" href="'
'https://commcare-hq.readthedocs.io/ucr/examples.html#data-source-filters'
'">these examples</a> and '
'<a target="_blank" href="'
'https://commcare-hq.readthedocs.io/ucr.html#data-source-filtering'
'">these docs</a>')
CONFIGURED_INDICATORS = _(
'Look at '
'<a target="_blank" href="'
'https://commcare-hq.readthedocs.io/ucr/examples.html#data-source-indicators'
'">these examples</a> and '
'<a target="_blank" href="'
'https://commcare-hq.readthedocs.io/ucr.html#indicators'
'">these docs</a>')
NAMED_EXPRESSIONS = _(
'For this advanced and useful feature, '
'give a dict where the keys are the variable names you choose '
'and the values are any valid expressions. You can then reference these from filters and indicators '
'wherever an expression goes using: <code>{"type": "named", "name": "myvarname"}</code>')
NAMED_FILTER = _('These behave exactly like named expressions (see above), except the values '
'should be a valid filter, and they can be used wherever filters are used above.')
ANALYTICS = _(
'Enabling this will let this data source and the data be imported into Analytics'
)
|
5a26cc49cf22b269c6205e8b46daf3ae75f96131
|
9905b1e94470e404ea189995a905c6e2ff4ba0bf
|
/src/mplhep/plot.py
|
3c1c40b42c59817de7f3ed08b16e6bcd34c76aba
|
[
"MIT"
] |
permissive
|
scikit-hep/mplhep
|
f1f6e340146b34eee758f3f695308f3299805132
|
2b61de182bf8d47abd975adb0afaccec318d9693
|
refs/heads/master
| 2023-08-25T08:48:01.320711
| 2023-08-17T19:34:33
| 2023-08-17T19:34:33
| 184,555,939
| 157
| 60
|
MIT
| 2023-08-22T03:31:46
| 2019-05-02T09:41:43
|
Python
|
UTF-8
|
Python
| false
| false
| 42,290
|
py
|
plot.py
|
from __future__ import annotations
import collections.abc
import inspect
import warnings
from collections import OrderedDict, namedtuple
from typing import TYPE_CHECKING, Any, Union
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.offsetbox import AnchoredText
from matplotlib.transforms import Bbox
from mpl_toolkits.axes_grid1 import axes_size, make_axes_locatable
from .utils import (
Plottable,
get_histogram_axes_title,
get_plottable_protocol_bins,
hist_object_handler,
isLight,
process_histogram_parts,
)
if TYPE_CHECKING:
from numpy.typing import ArrayLike
StairsArtists = namedtuple("StairsArtists", "stairs errorbar legend_artist")
ErrorBarArtists = namedtuple("ErrorBarArtists", "errorbar")
ColormeshArtists = namedtuple("ColormeshArtists", "pcolormesh cbar text")
Hist1DArtists = Union[StairsArtists, ErrorBarArtists]
Hist2DArtists = ColormeshArtists
def soft_update_kwargs(kwargs, mods, rc=True):
not_default = [k for k, v in mpl.rcParamsDefault.items() if v != mpl.rcParams[k]]
respect = [
"hatch.linewidth",
"lines.linewidth",
"patch.linewidth",
"lines.linestyle",
]
aliases = {"ls": "linestyle", "lw": "linewidth"}
kwargs = {aliases[k] if k in aliases else k: v for k, v in kwargs.items()}
for key, val in mods.items():
rc_modded = (key in not_default) or (
key in [k.split(".")[-1] for k in not_default if k in respect]
)
if key not in kwargs and (rc and not rc_modded):
kwargs[key] = val
return kwargs
########################################
# Histogram plotter
def histplot(
H, # Histogram object, tuple or array
bins=None, # Bins to be supplied when h is a value array or iterable of array
*,
yerr: ArrayLike | bool | None = None,
w2=None,
w2method=None,
stack=False,
density=False,
binwnorm=None,
histtype="step",
xerr=False,
label=None,
sort=None,
edges=True,
binticks=False,
ax=None,
flow="hint",
**kwargs,
):
"""
Create a 1D histogram plot from `np.histogram`-like inputs.
Parameters
----------
H : object
Histogram object with containing values and optionally bins. Can be:
- `np.histogram` tuple
- PlottableProtocol histogram object
- `boost_histogram` classic (<0.13) histogram object
- raw histogram values, provided `bins` is specified.
Or list thereof.
bins : iterable, optional
Histogram bins, if not part of ``h``.
yerr : iterable or bool, optional
Histogram uncertainties. Following modes are supported:
- True, sqrt(N) errors or poissonian interval when ``w2`` is specified
- shape(N) array of for one sided errors or list thereof
- shape(Nx2) array of for two sided errors or list thereof
w2 : iterable, optional
Sum of the histogram weights squared for poissonian interval error
calculation
w2method: callable, optional
Function calculating CLs with signature ``low, high = fcn(w, w2)``. Here
``low`` and ``high`` are given in absolute terms, not relative to w.
Default is ``None``. If w2 has integer values (likely to be data) poisson
interval is calculated, otherwise the resulting error is symmetric
``sqrt(w2)``. Specifying ``poisson`` or ``sqrt`` will force that behaviours.
stack : bool, optional
Whether to stack or overlay non-axis dimension (if it exists). N.B. in
contrast to ROOT, stacking is performed in a single call aka
``histplot([h1, h2, ...], stack=True)`` as opposed to multiple calls.
density : bool, optional
If true, convert sum weights to probability density (i.e. integrates to 1
over domain of axis) (Note: this option conflicts with ``binwnorm``)
binwnorm : float, optional
If true, convert sum weights to bin-width-normalized, with unit equal to
supplied value (usually you want to specify 1.)
histtype: {'step', 'fill', 'errorbar'}, optional, default: "step"
Type of histogram to plot:
- "step": skyline/step/outline of a histogram using `plt.step <https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.axes.Axes.step.html#matplotlib.axes.Axes.step>`_
- "fill": filled histogram using `plt.fill_between <https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.axes.Axes.step.html#matplotlib.axes.Axes.step>`_
- "errorbar": single marker histogram using `plt.errorbar <https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.axes.Axes.step.html#matplotlib.axes.Axes.step>`_
xerr: bool or float, optional
Size of xerr if ``histtype == 'errorbar'``. If ``True``, bin-width will be used.
label : str or list, optional
Label for legend entry.
sort: {'label'/'l', 'yield'/'y'}, optional
Append '_r' for reverse.
edges : bool, default: True, optional
Specifies whether to draw first and last edges of the histogram
binticks : bool, default: False, optional
Attempts to draw x-axis ticks coinciding with bin boundaries if feasible.
ax : matplotlib.axes.Axes, optional
Axes object (if None, last one is fetched or one is created)
flow : str, optional { "show", "sum", "hint", "none"}
Whether plot the under/overflow bin. If "show", add additional under/overflow bin. If "sum", add the under/overflow bin content to first/last bin.
**kwargs :
Keyword arguments passed to underlying matplotlib functions -
{'step', 'fill_between', 'errorbar'}.
Returns
-------
List[Hist1DArtists]
"""
# ax check
if ax is None:
ax = plt.gca()
else:
if not isinstance(ax, plt.Axes):
raise ValueError("ax must be a matplotlib Axes object")
# arg check
_allowed_histtype = ["fill", "step", "errorbar"]
_err_message = f"Select 'histtype' from: {_allowed_histtype}"
assert histtype in _allowed_histtype, _err_message
assert flow is None or flow in {
"show",
"sum",
"hint",
"none",
}, "flow must be show, sum, hint, or none"
# Convert 1/0 etc to real bools
stack = bool(stack)
density = bool(density)
edges = bool(edges)
binticks = bool(binticks)
# Process input
hists = list(process_histogram_parts(H, bins))
final_bins, xtick_labels = get_plottable_protocol_bins(hists[0].axes[0])
_bin_widths = np.diff(final_bins)
_bin_centers = final_bins[1:] - _bin_widths / float(2)
assert final_bins.ndim == 1, "bins need to be 1 dimensional"
_x_axes_label = ax.get_xlabel()
x_axes_label = (
_x_axes_label
if _x_axes_label != ""
else get_histogram_axes_title(hists[0].axes[0])
)
# Show under/overflow bins
# check underflow/overflow bin exist
underflow, overflow = 0.0, 0.0
for h in hists:
if (
hasattr(h, "values")
and "flow" not in inspect.getfullargspec(h.values).args
and flow is not None
):
continue
elif flow is None:
continue
elif (
hasattr(h, "axes")
and hasattr(h.axes[0], "traits")
and hasattr(h.axes[0].traits, "underflow")
):
if h.axes[0].traits.underflow:
underflow = underflow + h.values(flow=True)[0]
if h.axes[0].traits.overflow:
overflow = overflow + h.values(flow=True)[-1]
else:
underflow = underflow + h.values(flow=True)[0]
overflow = overflow + h.values(flow=True)[-1]
# "show": Add additional bin with 5 times bin width
plottables = []
flow_bins = final_bins
for i, h in enumerate(hists):
value, variance = h.values(), h.variances()
if hasattr(h, "values") and "flow" not in inspect.getfullargspec(h.values).args:
if flow == "sum" or flow == "show":
warnings.warn(
f"{type(h)} is not allowed to get flow bins", stacklevel=2
)
plottables.append(Plottable(value, edges=final_bins, variances=variance))
# check if the original hist has flow bins
elif (
hasattr(h, "axes")
and hasattr(h.axes[0], "traits")
and hasattr(h.axes[0].traits, "underflow")
and not h.axes[0].traits.underflow
and not h.axes[0].traits.overflow
and flow in {"show", "sum"}
):
warnings.warn(f"You don't have flow bins stored in {h!r}", stacklevel=2)
plottables.append(Plottable(value, edges=final_bins, variances=variance))
elif flow == "hint":
plottables.append(Plottable(value, edges=final_bins, variances=variance))
elif flow == "show":
if underflow > 0:
if i == 0:
flow_bins = np.insert(
final_bins,
0,
[
final_bins[0] - _bin_widths[0] * len(_bin_widths) * 0.08,
final_bins[0] - _bin_widths[0] * len(_bin_widths) * 0.03,
],
)
value, variance = np.insert(value, 0, np.nan), np.insert(
variance, 0, np.nan
)
value, variance = np.insert(
value, 0, h.values(flow=True)[0]
), np.insert(value, 0, h.variances(flow=True)[0])
if overflow > 0:
if i == 0:
flow_bins = np.append(
flow_bins,
[
final_bins[-1] + _bin_widths[-1] * len(_bin_widths) * 0.03,
final_bins[-1] + _bin_widths[-1] * len(_bin_widths) * 0.08,
],
)
value, variance = np.append(value, np.nan), np.append(variance, np.nan)
value, variance = np.append(value, h.values(flow=True)[-1]), np.append(
variance, h.variances(flow=True)[-1]
)
plottables.append(Plottable(value, edges=flow_bins, variances=variance))
elif flow == "sum":
value, variance = h.values().copy(), h.variances().copy()
value[0], value[-1] = (
value[0] + h.values(flow=True)[0],
value[-1] + h.values(flow=True)[-1],
)
variance[0], variance[-1] = (
variance[0] + h.variances(flow=True)[0],
variance[-1] + h.variances(flow=True)[-1],
)
plottables.append(Plottable(value, edges=final_bins, variances=variance))
else:
plottables.append(Plottable(value, edges=final_bins, variances=variance))
if w2 is not None:
for _w2, _plottable in zip(
w2.reshape(len(plottables), len(final_bins) - 1), plottables
):
_plottable.variances = _w2
_plottable.method = w2method
if w2 is not None and yerr is not None:
raise ValueError("Can only supply errors or w2")
_labels: list[str | None]
if label is None:
_labels = [None] * len(plottables)
elif isinstance(label, str):
_labels = [label] * len(plottables)
elif not np.iterable(label):
_labels = [str(label)] * len(plottables)
else:
_labels = [str(lab) for lab in label]
def iterable_not_string(arg):
return isinstance(arg, collections.abc.Iterable) and not isinstance(arg, str)
_chunked_kwargs: list[dict[str, Any]] = []
for _ in range(len(plottables)):
_chunked_kwargs.append({})
for kwarg in kwargs:
# Check if iterable
if iterable_not_string(kwargs[kwarg]):
# Check if tuple (can be used for colors)
if isinstance(kwargs[kwarg], tuple):
for i in range(len(_chunked_kwargs)):
_chunked_kwargs[i][kwarg] = kwargs[kwarg]
else:
for i, kw in enumerate(kwargs[kwarg]):
_chunked_kwargs[i][kwarg] = kw
else:
for i in range(len(_chunked_kwargs)):
_chunked_kwargs[i][kwarg] = kwargs[kwarg]
############################
# # yerr calculation
_yerr: np.ndarray | None
if yerr is not None:
# yerr is array
if hasattr(yerr, "__len__"):
_yerr = np.asarray(yerr)
# yerr is a number
elif isinstance(yerr, (int, float)) and not isinstance(yerr, bool):
_yerr = np.ones((len(plottables), len(final_bins) - 1)) * yerr
# yerr is automatic
else:
_yerr = None
else:
_yerr = None
if _yerr is not None:
assert isinstance(_yerr, np.ndarray)
if _yerr.ndim == 3:
# Already correct format
pass
elif _yerr.ndim == 2 and len(plottables) == 1:
# Broadcast ndim 2 to ndim 3
if _yerr.shape[-2] == 2: # [[1,1], [1,1]]
_yerr = _yerr.reshape(len(plottables), 2, _yerr.shape[-1])
elif _yerr.shape[-2] == 1: # [[1,1]]
_yerr = np.tile(_yerr, 2).reshape(len(plottables), 2, _yerr.shape[-1])
else:
raise ValueError("yerr format is not understood")
elif _yerr.ndim == 2:
# Broadcast yerr (nh, N) to (nh, 2, N)
_yerr = np.tile(_yerr, 2).reshape(len(plottables), 2, _yerr.shape[-1])
elif _yerr.ndim == 1:
# Broadcast yerr (1, N) to (nh, 2, N)
_yerr = np.tile(_yerr, 2 * len(plottables)).reshape(
len(plottables), 2, _yerr.shape[-1]
)
else:
raise ValueError("yerr format is not understood")
assert _yerr is not None
for yrs, _plottable in zip(_yerr, plottables):
_plottable.fixed_errors(*yrs)
# Sorting
if sort is not None:
if isinstance(sort, str):
if sort.split("_")[0] in ["l", "label"] and isinstance(_labels, list):
order = np.argsort(label) # [::-1]
elif sort.split("_")[0] in ["y", "yield"]:
_yields = [np.sum(_h.values) for _h in plottables]
order = np.argsort(_yields)
if len(sort.split("_")) == 2 and sort.split("_")[1] == "r":
order = order[::-1]
elif isinstance(sort, list) or isinstance(sort, np.ndarray):
if len(sort) != len(plottables):
raise ValueError(
f"Sort indexing array is of the wrong size - {len(sort)}, {len(plottables)} expected."
)
order = np.asarray(sort)
else:
raise ValueError(f"Sort type: {sort} not understood.")
plottables = [plottables[ix] for ix in order]
_chunked_kwargs = [_chunked_kwargs[ix] for ix in order]
_labels = [_labels[ix] for ix in order]
# ############################
# # Stacking, norming, density
if density is True and binwnorm is not None:
raise ValueError("Can only set density or binwnorm.")
if density is True:
if stack:
_total = np.sum(
np.array([plottable.values for plottable in plottables]), axis=0
)
for plottable in plottables:
plottable.flat_scale(1.0 / np.sum(np.diff(final_bins) * _total))
else:
for plottable in plottables:
plottable.density = True
elif binwnorm is not None:
for plottable, norm in zip(
plottables, np.broadcast_to(binwnorm, (len(plottables),))
):
plottable.flat_scale(norm / np.diff(final_bins))
# Stack
if stack and len(plottables) > 1:
from .utils import stack as stack_fun
plottables = stack_fun(*plottables)
##########
# Plotting
return_artists: list[StairsArtists | ErrorBarArtists] = []
if histtype == "step":
for i in range(len(plottables)):
do_errors = yerr is not False and (
(yerr is not None or w2 is not None)
or (plottables[i].variances is not None)
)
_kwargs = _chunked_kwargs[i]
_label = _labels[i] if do_errors else None
_step_label = _labels[i] if not do_errors else None
_kwargs = soft_update_kwargs(_kwargs, {"linewidth": 1.5})
_plot_info = plottables[i].to_stairs()
_plot_info["baseline"] = None if not edges else 0
_s = ax.stairs(
**_plot_info,
label=_step_label,
**_kwargs,
)
if do_errors:
_kwargs = soft_update_kwargs(_kwargs, {"color": _s.get_edgecolor()})
_kwargs["linestyle"] = "none"
_plot_info = plottables[i].to_errorbar()
_e = ax.errorbar(
**_plot_info,
**_kwargs,
)
_e_leg = ax.errorbar(
[], [], yerr=1, xerr=1, color=_s.get_edgecolor(), label=_label
)
return_artists.append(
StairsArtists(
_s,
_e if do_errors else None,
_e_leg if do_errors else None,
)
)
_artist = _s
elif histtype == "fill":
for i in range(len(plottables)):
_kwargs = _chunked_kwargs[i]
_f = ax.stairs(
**plottables[i].to_stairs(), label=_labels[i], fill=True, **_kwargs
)
return_artists.append(StairsArtists(_f, None, None))
_artist = _f
elif histtype == "errorbar":
err_defaults = {
"linestyle": "none",
"marker": ".",
"markersize": 10.0,
"elinewidth": 1,
}
_xerr: np.ndarray | float | int | None
if xerr is True:
_xerr = _bin_widths / 2
elif isinstance(xerr, (int, float)) and not isinstance(xerr, bool):
_xerr = xerr
else:
_xerr = None
for i in range(len(plottables)):
_plot_info = plottables[i].to_errorbar()
if yerr is False:
_plot_info["yerr"] = None
_plot_info["xerr"] = _xerr
_e = ax.errorbar(
**_plot_info,
label=_labels[i],
**soft_update_kwargs(_chunked_kwargs[i], err_defaults),
)
return_artists.append(ErrorBarArtists(_e))
_artist = _e[0]
# Add sticky edges for autoscale
_artist.sticky_edges.y.append(0)
if xtick_labels is None:
if binticks:
_slice = int(round(float(len(final_bins)) / len(ax.get_xticks()))) + 1
ax.set_xticks(final_bins[::_slice])
elif flow == "show":
if binticks:
_slice = int(round(float(len(final_bins)) / len(ax.get_xticks()))) + 1
ax.set_xticks(final_bins[::_slice])
else:
ax.set_xticks(_bin_centers)
ax.set_xticklabels(xtick_labels)
if x_axes_label:
ax.set_xlabel(x_axes_label)
if flow in {"hint", "show"} and (underflow > 0.0 or overflow > 0.0):
d = 0.9 # proportion of vertical to horizontal extent of the slanted line
trans = mpl.transforms.blended_transform_factory(ax.transData, ax.transAxes)
ax_h = ax.bbox.height
kwargs = dict(
marker=[(-0.5, -d), (0.5, d)],
markersize=ax_h * 0.05,
linestyle="none",
color="k",
mec="k",
mew=1,
clip_on=False,
transform=trans,
)
xticks = ax.get_xticks().tolist()
if underflow > 0.0:
if flow == "hint":
ax.plot(
[
final_bins[0] - _bin_widths[0] * len(_bin_widths) * 0.03,
final_bins[0],
],
[0, 0],
**kwargs,
)
if flow == "show":
ax.plot(
[flow_bins[1], flow_bins[2]],
[0, 0],
**kwargs,
)
xticks[0] = ""
xticks[1] = f"<{flow_bins[2]}"
ax.set_xticklabels(xticks)
if overflow > 0.0:
if flow == "hint":
ax.plot(
[
final_bins[-1],
final_bins[-1] + _bin_widths[-1] * len(_bin_widths) * 0.03,
],
[0, 0],
**kwargs,
)
if flow == "show":
ax.plot(
[flow_bins[-3], flow_bins[-2]],
[0, 0],
**kwargs,
)
xticks[-1] = ""
xticks[-2] = f">{flow_bins[-3]}"
ax.set_xticklabels(xticks)
return return_artists
def hist2dplot(
H,
xbins=None,
ybins=None,
labels=None,
cbar=True,
cbarsize="7%",
cbarpad=0.2,
cbarpos="right",
cbarextend=False,
cmin=None,
cmax=None,
ax=None,
flow="hint",
**kwargs,
):
"""
Create a 2D histogram plot from `np.histogram`-like inputs.
Parameters
----------
H : object
Histogram object with containing values and optionally bins. Can be:
- `np.histogram` tuple
- `boost_histogram` histogram object
- raw histogram values as list of list or 2d-array
xbins : 1D array-like, optional, default None
Histogram bins along x axis, if not part of ``H``.
ybins : 1D array-like, optional, default None
Histogram bins along y axis, if not part of ``H``.
labels : 2D array (H-like) or bool, default None, optional
Array of per-bin labels to display. If ``True`` will
display numerical values
cbar : bool, optional, default True
Draw a colorbar. In contrast to mpl behaviors the cbar axes is
appended in such a way that it doesn't modify the original axes
width:height ratio.
cbarsize : str or float, optional, default "7%"
Colorbar width.
cbarpad : float, optional, default 0.2
Colorbar distance from main axis.
cbarpos : {'right', 'left', 'bottom', 'top'}, optional, default "right"
Colorbar position w.r.t main axis.
cbarextend : bool, optional, default False
Extends figure size to keep original axes size same as without cbar.
Only safe for 1 axes per fig.
cmin : float, optional
Colorbar minimum.
cmax : float, optional
Colorbar maximum.
ax : matplotlib.axes.Axes, optional
Axes object (if None, last one is fetched or one is created)
flow : str, optional {"show", "sum","hint", None}
Whether plot the under/overflow bin. If "show", add additional under/overflow bin. If "sum", add the under/overflow bin content to first/last bin. "hint" would highlight the bins with under/overflow contents
**kwargs :
Keyword arguments passed to underlying matplotlib function - pcolormesh.
Returns
-------
Hist2DArtist
"""
# ax check
if ax is None:
ax = plt.gca()
else:
if not isinstance(ax, plt.Axes):
raise ValueError("ax must be a matplotlib Axes object")
h = hist_object_handler(H, xbins, ybins)
# TODO: use Histogram everywhere
H = h.values()
xbins, xtick_labels = get_plottable_protocol_bins(h.axes[0])
ybins, ytick_labels = get_plottable_protocol_bins(h.axes[1])
# Show under/overflow bins
# "show": Add additional bin with 2 times bin width
if (
hasattr(h, "values")
and "flow" not in inspect.getfullargspec(h.values).args
and flow is not None
):
print(
f"Warning: {type(h)} is not allowed to get flow bins, flow bin option set to None"
)
flow = None
elif (
hasattr(h, "axes")
and hasattr(h.axes[0], "traits")
and hasattr(h.axes[0].traits, "underflow")
and not h.axes[0].traits.underflow
and not h.axes[0].traits.overflow
):
flow = None
print(f"Warning: you don't have flow bins stored in {h}")
elif flow == "show":
H = h.values(flow=True)
if any(h.values(flow=True)[0] > 0):
xbins = np.array(
[
xbins[0] - (xbins[-1] - xbins[0]) * 0.08,
xbins[0] - (xbins[-1] - xbins[0]) * 0.03,
*xbins,
]
)
if any(h.values(flow=True)[-1] > 0):
xbins = np.array(
[
*xbins,
xbins[-1] + (xbins[-1] - xbins[0]) * 0.03,
xbins[-1] + (xbins[-1] - xbins[0]) * 0.08,
]
)
if any(h.values(flow=True)[:, 0] > 0):
ybins = np.array(
[
ybins[0] - (ybins[-1] - ybins[0]) * 0.08,
ybins[0] - (ybins[-1] - ybins[0]) * 0.03,
*ybins,
]
)
if any(h.values(flow=True)[:, -1] > 0):
ybins = np.array(
[
*ybins,
ybins[-1] + (ybins[-1] - ybins[0]) * 0.03,
ybins[-1] + (ybins[-1] - ybins[0]) * 0.08,
]
)
if any(h.values(flow=True)[0] > 0.0):
H = np.insert(H, (1), np.nan, axis=-1)
if any(h.values(flow=True)[-1] > 0.0):
H = np.insert(H, (-1), np.nan, axis=-1)
if any(h.values(flow=True)[:, 0] > 0):
H = np.insert(H, (1), np.full(np.shape(H)[1], np.nan), axis=0)
if any(h.values(flow=True)[:, -1] > 0):
H = np.insert(H, (-1), np.full(np.shape(H)[1], np.nan), axis=0)
elif flow == "sum":
H = h.values().copy()
# Sum borders
H[0], H[-1] = (
H[0] + h.values(flow=True)[0, 1:-1],
H[-1] + h.values(flow=True)[-1, 1:-1],
)
H[:, 0], H[:, -1] = (
H[:, 0] + h.values(flow=True)[1:-1, 0],
H[:, -1] + h.values(flow=True)[1:-1, -1],
)
# Sum corners to corners
H[0, 0], H[-1, -1], H[0, -1], H[-1, 0] = (
h.values(flow=True)[0, 0] + H[0, 0],
h.values(flow=True)[-1, -1] + H[-1, -1],
h.values(flow=True)[0, -1] + H[0, -1],
h.values(flow=True)[-1, 0] + H[-1, 0],
)
xbin_centers = xbins[1:] - np.diff(xbins) / float(2)
ybin_centers = ybins[1:] - np.diff(ybins) / float(2)
_x_axes_label = ax.get_xlabel()
x_axes_label = (
_x_axes_label if _x_axes_label != "" else get_histogram_axes_title(h.axes[0])
)
_y_axes_label = ax.get_ylabel()
y_axes_label = (
_y_axes_label if _y_axes_label != "" else get_histogram_axes_title(h.axes[1])
)
H = H.T
if cmin is not None:
H[H < cmin] = None
if cmax is not None:
H[H > cmax] = None
X, Y = np.meshgrid(xbins, ybins)
kwargs.setdefault("shading", "flat")
pc = ax.pcolormesh(X, Y, H, vmin=cmin, vmax=cmax, **kwargs)
if x_axes_label:
ax.set_xlabel(x_axes_label)
if y_axes_label:
ax.set_ylabel(y_axes_label)
ax.set_xlim(xbins[0], xbins[-1])
ax.set_ylim(ybins[0], ybins[-1])
if xtick_labels is None: # Ordered axis
if len(ax.get_xticks()) > len(xbins) * 0.7:
ax.set_xticks(xbins)
else: # Categorical axis
ax.set_xticks(xbin_centers)
ax.set_xticklabels(xtick_labels)
if ytick_labels is None:
if len(ax.get_yticks()) > len(ybins) * 0.7:
ax.set_yticks(ybins)
else: # Categorical axis
ax.set_yticks(ybin_centers)
ax.set_yticklabels(ytick_labels)
if cbar:
cax = append_axes(
ax, size=cbarsize, pad=cbarpad, position=cbarpos, extend=cbarextend
)
cb_obj = plt.colorbar(pc, cax=cax)
else:
cb_obj = None
plt.sca(ax)
if flow == "hint" or flow == "show":
d = 0.9 # proportion of vertical to horizontal extent of the slanted line
trans = mpl.transforms.blended_transform_factory(ax.transData, ax.transAxes)
ax_h = ax.bbox.height
kwargs = dict(
marker=[(-0.5, -d), (0.5, d)],
markersize=ax_h * 0.05,
linestyle="none",
color="k",
mec="k",
mew=1,
clip_on=False,
)
if any(h.values(flow=True)[0] > 0):
if flow == "hint":
ax.plot(
[
xbins[0] - np.diff(xbins)[0] * len(np.diff(xbins)) * 0.03,
xbins[0],
],
[0, 0],
transform=trans,
**kwargs,
)
if flow == "show":
ax.plot([xbins[1], xbins[2]], [0, 0], transform=trans, **kwargs)
ax.plot([xbins[0], xbins[0]], [ybins[1], ybins[2]], **kwargs)
if any(h.values(flow=True)[:, 0] > 0):
if flow == "hint":
ax.plot(
[
xbins[-1] + np.diff(xbins)[-1] * len(np.diff(xbins)) * 0.03,
xbins[-1],
],
[0, 0],
transform=trans,
**kwargs,
)
if flow == "show":
ax.plot([xbins[-3], xbins[-2]], [0, 0], transform=trans, **kwargs)
ax.plot([xbins[-1], xbins[-1]], [ybins[1], ybins[2]], **kwargs)
if any(h.values(flow=True)[-1] > 0):
if flow == "hint":
ax.plot(
[
xbins[0],
xbins[0] - np.diff(xbins)[0] * len(np.diff(xbins)) * 0.03,
],
[1, 1],
transform=trans,
**kwargs,
)
if flow == "show":
ax.plot([xbins[1], xbins[2]], [1, 1], transform=trans, **kwargs)
ax.plot([xbins[0], xbins[0]], [ybins[-3], ybins[-2]], **kwargs)
if any(h.values(flow=True)[:, -1] > 0):
if flow == "hint":
ax.plot(
[
xbins[-1] + np.diff(xbins)[-1] * len(np.diff(xbins)) * 0.03,
xbins[-1],
],
[1, 1],
transform=trans,
**kwargs,
)
if flow == "show":
ax.plot([xbins[-3], xbins[-2]], [1, 1], transform=trans, **kwargs)
ax.plot([xbins[-1], xbins[-1]], [ybins[-3], ybins[-2]], **kwargs)
_labels: np.ndarray | None = None
if isinstance(labels, bool):
_labels = H if labels else None
elif np.iterable(labels):
label_array = np.asarray(labels).T
if H.shape == label_array.shape:
_labels = label_array
else:
raise ValueError(
f"Labels input has incorrect shape (expect: {H.shape}, got: {label_array.shape})"
)
elif labels is not None:
raise ValueError(
"Labels not understood, either specify a bool or a Hist-like array"
)
text_artists = []
if _labels is not None:
for ix, xc in enumerate(xbin_centers):
for iy, yc in enumerate(ybin_centers):
color = (
"black"
if isLight(pc.cmap(pc.norm(H[iy, ix]))[:-1])
else "lightgrey"
)
text_artists.append(
ax.text(
xc, yc, _labels[iy, ix], ha="center", va="center", color=color
)
)
return ColormeshArtists(pc, cb_obj, text_artists)
#############################################
# Utils
def overlap(ax, bbox, get_vertices=False):
"""
Find overlap of bbox for drawn elements an axes.
"""
from matplotlib.lines import Line2D
from matplotlib.patches import Patch, Rectangle
# From
# https://github.com/matplotlib/matplotlib/blob/08008d5cb4d1f27692e9aead9a76396adc8f0b19/lib/matplotlib/legend.py#L845
lines = []
bboxes = []
for handle in ax.lines:
assert isinstance(handle, Line2D)
path = handle.get_path()
lines.append(path)
for handle in ax.collections:
for path in handle.get_paths():
lines.append(path.interpolated(20))
for handle in ax.patches:
assert isinstance(handle, Patch)
if isinstance(handle, Rectangle):
transform = handle.get_data_transform()
bboxes.append(handle.get_bbox().transformed(transform))
else:
transform = handle.get_transform()
bboxes.append(handle.get_path().get_extents(transform))
# TODO Possibly other objects
vertices = np.concatenate([line.vertices for line in lines])
tvertices = [ax.transData.transform(v) for v in vertices]
overlap = bbox.count_contains(tvertices) + bbox.count_overlaps(bboxes)
if get_vertices:
return overlap, vertices
else:
return overlap
def _draw_leg_bbox(ax):
"""
Draw legend() and fetch it's bbox
"""
fig = ax.figure
leg = ax.get_legend()
fig.canvas.draw()
return leg.get_frame().get_bbox()
def _draw_text_bbox(ax):
"""
Draw legend() and fetch it's bbox
"""
fig = ax.figure
textboxes = [k for k in ax.get_children() if isinstance(k, AnchoredText)]
if len(textboxes) > 1:
print("Warning: More than one textbox found")
for box in textboxes:
if box.loc in [1, 2]:
bbox = box.get_tightbbox(fig.canvas.renderer)
else:
bbox = textboxes[0].get_tightbbox(fig.canvas.renderer)
return bbox
def yscale_legend(ax=None):
"""
Automatically scale y-axis up to fit in legend()
"""
if ax is None:
ax = plt.gca()
scale_factor = 10 ** (1.05) if ax.get_yscale() == "log" else 1.05
while overlap(ax, _draw_leg_bbox(ax)) > 0:
ax.set_ylim(ax.get_ylim()[0], ax.get_ylim()[-1] * scale_factor)
ax.figure.canvas.draw()
return ax
def yscale_text(ax=None):
"""
Automatically scale y-axis up to fit AnchoredText
"""
if ax is None:
ax = plt.gca()
while overlap(ax, _draw_text_bbox(ax)) > 0:
ax.set_ylim(ax.get_ylim()[0], ax.get_ylim()[-1] * 1.1)
ax.figure.canvas.draw()
return ax
def ylow(ax=None, ylow=None):
"""
Set lower y limit to 0 if not data/errors go lower.
Or set a specific value
"""
if ax is None:
ax = plt.gca()
if ax.get_yaxis().get_scale() == "log":
return ax
if ylow is None:
# Check full figsize below 0
bbox = Bbox.from_bounds(
0, 0, ax.get_window_extent().width, -ax.get_window_extent().height
)
if overlap(ax, bbox) == 0:
ax.set_ylim(0, None)
else:
ydata = overlap(ax, bbox, get_vertices=True)[1][:, 1]
ax.set_ylim(np.min([np.min(ydata), ax.get_ylim()[0]]), None)
else:
ax.set_ylim(0, ax.get_ylim()[-1])
return ax
def mpl_magic(ax=None, info=True):
"""
Consolidate all ex-post style adjustments:
ylow
yscale_legend
"""
if ax is None:
ax = plt.gca()
if not info:
print("Running ROOT/CMS style adjustments (hide with info=False):")
ax = ylow(ax)
ax = yscale_legend(ax)
ax = yscale_text(ax)
return ax
########################################
# Figure/axes helpers
def rescale_to_axessize(ax, w, h):
"""
Adjust figure size to axes size in inches
Parameters: w, h: width, height in inches
"""
if not ax:
ax = plt.gca()
left = ax.figure.subplotpars.left
r = ax.figure.subplotpars.right
t = ax.figure.subplotpars.top
b = ax.figure.subplotpars.bottom
figw = float(w) / (r - left)
figh = float(h) / (t - b)
ax.figure.set_size_inches(figw, figh)
def box_aspect(ax, aspect=1):
"""
Adjust figure size to axes size in inches
Parameters: aspect: float, optional aspect ratio
"""
position = ax.get_position()
fig_width, fig_height = ax.get_figure().get_size_inches()
fig_aspect = fig_height / fig_width
pb = position.frozen()
pb1 = pb.shrunk_to_aspect(aspect, pb, fig_aspect)
ax.set_position(pb1)
class RemainderFixed(axes_size.Scaled):
def __init__(self, xsizes, ysizes, divider):
self.xsizes = xsizes
self.ysizes = ysizes
self.div = divider
def get_size(self, renderer):
xrel, xabs = axes_size.AddList(self.xsizes).get_size(renderer)
yrel, yabs = axes_size.AddList(self.ysizes).get_size(renderer)
bb = Bbox.from_bounds(*self.div.get_position()).transformed(
self.div._fig.transFigure
)
w = bb.width / self.div._fig.dpi - xabs
h = bb.height / self.div._fig.dpi - yabs
return 0, min([w, h])
def make_square_add_cbar(ax, size=0.4, pad=0.1):
"""
Make input axes square and return an appended axes to the right for
a colorbar. Both axes resize together to fit figure automatically.
Works with tight_layout().
"""
divider = make_axes_locatable(ax)
margin_size = axes_size.Fixed(size)
pad_size = axes_size.Fixed(pad)
xsizes = [pad_size, margin_size]
ysizes = xsizes
cax = divider.append_axes("right", size=margin_size, pad=pad_size)
divider.set_horizontal([RemainderFixed(xsizes, ysizes, divider)] + xsizes)
divider.set_vertical([RemainderFixed(xsizes, ysizes, divider)] + ysizes)
return cax
def append_axes(ax, size=0.1, pad=0.1, position="right", extend=False):
"""
Append a side ax to the current figure and return it.
Figure is automatically extended along the direction of the added axes to
accommodate it. Unfortunately can not be reliably chained.
"""
fig = ax.figure
bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
width, height = bbox.width, bbox.height
def convert(fraction, position=position):
if isinstance(fraction, str) and fraction.endswith("%"):
if position in ["right", "left"]:
fraction = width * float(fraction.strip("%")) / 100
elif position in ["top", "bottom"]:
fraction = height * float(fraction.strip("%")) / 100
return fraction
size = convert(size)
pad = convert(pad)
divider = make_axes_locatable(ax)
margin_size = axes_size.Fixed(size)
pad_size = axes_size.Fixed(pad)
xsizes = [pad_size, margin_size]
if position in ["top", "bottom"]:
xsizes = xsizes[::-1]
yhax = divider.append_axes(position, size=margin_size, pad=pad_size)
if extend:
def extend_ratio(ax, yhax):
ax.figure.canvas.draw()
orig_size = ax.get_position().size
new_size = sum(itax.get_position().size for itax in [ax, yhax])
return new_size / orig_size
if position in ["right"]:
divider.set_horizontal([axes_size.Fixed(width)] + xsizes)
fig.set_size_inches(
fig.get_size_inches()[0] * extend_ratio(ax, yhax)[0],
fig.get_size_inches()[1],
)
elif position in ["left"]:
divider.set_horizontal(xsizes[::-1] + [axes_size.Fixed(width)])
fig.set_size_inches(
fig.get_size_inches()[0] * extend_ratio(ax, yhax)[0],
fig.get_size_inches()[1],
)
elif position in ["top"]:
divider.set_vertical([axes_size.Fixed(height)] + xsizes[::-1])
fig.set_size_inches(
fig.get_size_inches()[0],
fig.get_size_inches()[1] * extend_ratio(ax, yhax)[1],
)
ax.get_shared_x_axes().join(ax, yhax)
elif position in ["bottom"]:
divider.set_vertical(xsizes + [axes_size.Fixed(height)])
fig.set_size_inches(
fig.get_size_inches()[0],
fig.get_size_inches()[1] * extend_ratio(ax, yhax)[1],
)
ax.get_shared_x_axes().join(ax, yhax)
return yhax
####################
# Legend Helpers
def hist_legend(ax=None, **kwargs):
from matplotlib.lines import Line2D
if ax is None:
ax = plt.gca()
handles, labels = ax.get_legend_handles_labels()
new_handles = [
Line2D([], [], c=h.get_edgecolor()) if isinstance(h, mpl.patches.Polygon) else h
for h in handles
]
ax.legend(handles=new_handles[::-1], labels=labels[::-1], **kwargs)
return ax
def sort_legend(ax, order=None):
"""
ax : axes with legend labels in it
order : Ordered dict with renames or array with order
"""
handles, labels = ax.get_legend_handles_labels()
by_label = OrderedDict(zip(labels, handles))
if isinstance(order, OrderedDict):
ordered_label_list = list(order.keys())
elif isinstance(order, (list, tuple, np.ndarray)):
ordered_label_list = list(order)
elif order is None:
ordered_label_list = labels
else:
raise TypeError(f"Unexpected values type of order: {type(order)}")
ordered_label_list = [entry for entry in ordered_label_list if entry in labels]
ordered_label_values = [by_label[k] for k in ordered_label_list]
if isinstance(order, OrderedDict):
ordered_label_list = [order[k] for k in ordered_label_list]
return ordered_label_values, ordered_label_list
|
39c404b433c5279b07d083abc1f5f0e47d0a1d96
|
0744dcc5394cebf57ebcba343747af6871b67017
|
/tools/ttrace_parser/scripts/ttrace.py
|
1ae01ab5bfd9e733a2b74b4d7a23488a68543e6b
|
[
"Apache-2.0"
] |
permissive
|
Samsung/TizenRT
|
96abf62f1853f61fcf91ff14671a5e0c6ca48fdb
|
1a5c2e00a4b1bbf4c505bbf5cc6a8259e926f686
|
refs/heads/master
| 2023-08-31T08:59:33.327998
| 2023-08-08T06:09:20
| 2023-08-31T04:38:20
| 82,517,252
| 590
| 719
|
Apache-2.0
| 2023-09-14T06:54:49
| 2017-02-20T04:38:30
|
C
|
UTF-8
|
Python
| false
| false
| 19,521
|
py
|
ttrace.py
|
#!/usr/bin/env python
###########################################################################
#
# Copyright 2017 Samsung Electronics All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
###########################################################################
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Android system-wide tracing utility.
This is a tool for capturing a trace that includes data from both userland and
the kernel. It creates an HTML file for visualizing the trace.
"""
from __future__ import print_function
import os
import sys
import time
import zlib
import errno
import string
import select
import optparse
import pid_parser
import subprocess
flattened_css_file = 'style.css'
flattened_js_file = 'tscript.js'
g_device_serial = None
class OptionParserIgnoreErrors(optparse.OptionParser):
def error(self, msg):
pass
def exit(self):
pass
def print_usage(self):
pass
def print_help(self):
pass
def print_version(self):
pass
def compose_html_win(script_dir, options, css, js, templates):
data = []
ret_fd = os.open(options.from_file_win, os.O_RDONLY | os.O_BINARY)
out = os.read(ret_fd, 4096)
parts = out.split('TRACE:', 1)
data.append(parts[1])
while True:
out = os.read(ret_fd, 4096)
keepReading = False
if len(out) > 0:
keepReading = True
data.append(out)
if not keepReading:
break
data = ''.join(data)
if data.startswith('\r\n'):
data = data.replace('\r\n', '\n')
data = data[1:]
html_filename = options.output_file
html_prefix = read_asset(script_dir, 'prefix.html')
html_suffix = read_asset(script_dir, 'suffix.html')
html_file = open(html_filename, 'w')
html_file.write(html_prefix % (css, js, templates))
size = 4096
dec = zlib.decompressobj()
for chunk in (data[i:i + size] for i in range(0, len(data), size)):
decoded_chunk = dec.decompress(chunk)
html_chunk = decoded_chunk.replace('\n', '\\n\\\n')
html_file.write(html_chunk)
html_out = dec.flush().replace('\n', '\\n\\\n')
# write body
html_file.write(html_out)
# write suffix
html_file.write(html_suffix)
html_file.close()
print("\n wrote file://%s\n" % os.path.abspath(options.output_file))
return
def compose_html(script_dir, options, css, js, templates):
html_filename = options.output_file
html_prefix = read_asset(script_dir, 'prefix.html')
html_suffix = read_asset(script_dir, 'suffix.html')
html_file = open(html_filename, 'w')
html_file.write(html_prefix % (css, js, templates))
cur_dir = os.getcwd()
# remove useless 2 lines
with open(os.path.join(cur_dir, options.from_text_file), "r") as input:
with open(os.path.join(cur_dir, options.from_text_file + 'composing'), "wb") as output:
for line in input:
if "capturing trace" in line:
continue
elif "TRACE:" in line:
continue
elif " done" in line:
continue
elif '\n' == line:
continue
else:
output.write(line)
# case not compressed, boot case
html_out = read_asset(script_dir, os.path.join(cur_dir, options.from_text_file + 'composing'))
html_out = html_out.replace('\n', '\\n\\\n')
os.remove(os.path.join(cur_dir, options.from_text_file + 'composing'))
# write body
html_file.write(html_out)
# Write suffix
html_file.write(html_suffix)
html_file.close()
print("\n wrote file://%s\n" % os.path.abspath(options.output_file))
return
def get_os_cmd(cmdARGS):
fd_popen = subprocess.Popen(cmdARGS.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
ready = select.select([fd_popen.stdout, fd_popen.stderr], [], [fd_popen.stdout, fd_popen.stderr])
if fd_popen.stdout in ready[0]:
out = os.read(fd_popen.stdout.fileno(), 4096)
return out
else:
return 0
def sdb_shell(str_param):
global g_options
cmd_str = ['sdb']
if g_device_serial:
cmd_str.extend(['-s', str(g_device_serial)])
cmd_str.extend([str(str_param)])
os.system(string.join(cmd_str))
os.system('sleep 2')
def is_sdb_available():
no = 0
max_no = 10
sdb_shell('kill-server')
while(no < max_no):
str_cmd = get_os_cmd('sdb start-server')
str_cmd = get_os_cmd('sdb devices')
os.system('sleep 2')
l_devices = str_cmd.split('\n')
if len(l_devices) > 3:
if g_device_serial is None:
print('Please specify serial with -e option')
sys.exit(1)
dev_type = str_cmd.split("List of devices attached")[-1].split()
if 'device' in dev_type:
print('Ready to connect')
return dev_type[0]
else:
no = no + 1
print('retry...' + str(no))
sdb_shell('kill-server')
if no == max_no:
print('Could not connect to SDB devices')
sys.exit(1)
def set_sdb_root():
dev_type = is_sdb_available()
if dev_type == 0:
return 0
sdb_shell('root on')
if not ('emulator' in dev_type):
sdb_shell('shell change-booting-mode.sh --update')
print('SDB was rooted!!!')
return 1
def trace_bootup(cmd):
if set_sdb_root() == 0:
return
print(cmd + ' > /etc/ttrace.conf\'')
str_cmd = cmd + ' > /etc/ttrace.conf\''
os.system(str_cmd)
os.system('sleep 2')
sdb_shell('shell sync')
sdb_shell('shell reboot')
sdb_shell('kill-server')
def add_sdb_serial(command, serial):
if serial is not None:
command.insert(1, serial)
command.insert(1, '-s')
def main():
global g_device_serial
usage = "Usage: %prog [options] [category1 [category2 ...]]"
desc = "Example: %prog -b 32768 -t 15 gfx input view sched freq"
parser = optparse.OptionParser(usage=usage, description=desc)
parser.add_option('-o', dest='output_file', help='write HTML to FILE',
default='trace.html', metavar='FILE')
parser.add_option('-t', '--time', dest='trace_time', type='int',
help='trace for N seconds', metavar='N')
parser.add_option('-b', '--buf-size', dest='trace_buf_size', type='int',
help='use a trace buffer size of N KB', metavar='N')
parser.add_option('-l', '--list-categories', dest='list_categories', default=False,
action='store_true', help='list the available categories and exit')
parser.add_option('-u', '--bootup', dest='trace_bootup', default=False,
action='store_true', help='trace boot up')
parser.add_option('--link-assets', dest='link_assets', default=False,
action='store_true', help='link to original CSS or JS resources '
'instead of embedding them')
parser.add_option('--from-file', dest='from_file', action='store',
help='read the trace from a file (compressed) rather than running a live trace')
parser.add_option('--from-file-win', dest='from_file_win', action='store',
help='read the trace from a file (compressed) rather than running a live trace on windows')
parser.add_option('--from-text-file', dest='from_text_file', action='store',
help='read the trace from a file (not compressed) rather than running a live trace')
parser.add_option('--asset-dir', dest='asset_dir', default='trace-viewer',
type='string', help='')
parser.add_option('-e', '--serial', dest='device_serial', type='string',
help='sdb device serial number')
parser.add_option('--async_start', dest='async_start', default=False, action='store_true',
help='start circular trace and return immediately')
parser.add_option('--async_dump', dest='async_dump', default=False, action='store_true',
help='dump the current contents of circular trace buffer')
parser.add_option('--async_stop', dest='async_stop', default=False, action='store_true',
help='stop tracing and dump the current contents of circular trace buffer')
parser.add_option('--append', dest='append', default=False, action='store_true',
help='append traces to the existing traces. do not clear the trace buffer')
parser.add_option('--backup', dest='backup', default=False, action='store_true',
help='back up the existing traces to /tmp/trace.backup and then clear the trace buffer')
options, args = parser.parse_args()
if options.list_categories:
atrace_args = ['sdb', 'shell', 'atrace', '--list_categories']
expect_trace = False
elif options.from_file is not None:
atrace_args = ['cat', options.from_file]
expect_trace = True
elif options.from_file_win is not None:
atrace_args = ['type', options.from_file_win]
expect_trace = True
elif options.from_text_file is not None:
atrace_args = ['cat', options.from_text_file]
expect_trace = True
else:
if options.trace_bootup:
atrace_args = ['sdb', 'shell', '\'echo', 'atrace']
expect_trace = True
else:
atrace_args = ['sdb', 'shell', 'atrace', '-z']
expect_trace = True
if options.trace_time is not None:
if options.trace_time > 0:
atrace_args.extend(['-t', str(options.trace_time)])
else:
parser.error('the trace time must be a positive number')
if options.trace_buf_size is not None:
if options.trace_buf_size > 0:
atrace_args.extend(['-b', str(options.trace_buf_size)])
else:
parser.error('the trace buffer size must be a positive number')
atrace_args.extend(args)
if atrace_args[0] == 'sdb':
add_sdb_serial(atrace_args, options.device_serial)
if options.device_serial:
g_device_serial = str(options.device_serial).strip()
else:
g_device_serial = None
script_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
if options.link_assets:
src_dir = os.path.join(script_dir, options.asset_dir, 'src')
build_dir = os.path.join(script_dir, options.asset_dir, 'build')
js_files, js_flattenizer, css_files, templates = get_assets(src_dir, build_dir)
css = '\n'.join(linked_css_tag % (os.path.join(src_dir, f)) for f in css_files)
js = '<script language="javascript">\n%s</script>\n' % js_flattenizer
js += '\n'.join(linked_js_tag % (os.path.join(src_dir, f)) for f in js_files)
else:
css_filename = os.path.join(script_dir, flattened_css_file)
js_filename = os.path.join(script_dir, flattened_js_file)
css = compiled_css_tag % (open(css_filename).read())
js = compiled_js_tag % (open(js_filename).read())
templates = ''
html_filename = options.output_file
if options.trace_bootup:
print("Trace for bootup")
atrace_args.extend(['--async_start'])
trace_bootup(string.join(atrace_args))
print("Please pull out the usb cable on target")
os.system('sleep ' + '40')
print("Please plug the usb cable to target")
os.system('sleep ' + '20')
atrace_args.remove('--async_start')
atrace_args.remove('\'echo')
atrace_args.extend(['-z', '--async_stop'])
expect_trace = True
if options.from_text_file:
compose_html(script_dir, options, css, js, templates)
return
elif options.from_file_win:
compose_html_win(script_dir, options, css, js, templates)
return
elif options.from_file:
print("From file")
if options.async_start:
atrace_args.extend(['--async_start'])
if options.async_dump:
atrace_args.extend(['--async_dump'])
if options.async_stop:
atrace_args.extend(['--async_stop'])
if options.append:
atrace_args.extend(['--append'])
if options.backup:
atrace_args.extend(['--backup'])
backup_trace = True
sdb = subprocess.Popen(atrace_args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if options.async_start:
return
result = None
data = []
# Read the text portion of the output and watch for the 'TRACE:' marker that
# indicates the start of the trace data.
while result is None:
ready = select.select([sdb.stdout, sdb.stderr], [], [sdb.stdout, sdb.stderr])
if sdb.stderr in ready[0]:
err = os.read(sdb.stderr.fileno(), 4096)
sys.stderr.write(err)
sys.stderr.flush()
if sdb.stdout in ready[0]:
out = os.read(sdb.stdout.fileno(), 4096)
parts = out.split('\nTRACE:', 1)
txt = parts[0].replace('\r', '')
if len(parts) == 2:
# The '\nTRACE:' match stole the last newline from the text, so add it
# back here.
txt += '\n'
sys.stdout.write(txt)
sys.stdout.flush()
if len(parts) == 2:
data.append(parts[1])
sys.stdout.write("downloading trace...")
sys.stdout.flush()
break
result = sdb.poll()
# Read and buffer the data portion of the output.
while True:
ready = select.select([sdb.stdout, sdb.stderr], [], [sdb.stdout, sdb.stderr])
keepReading = False
if sdb.stderr in ready[0]:
err = os.read(sdb.stderr.fileno(), 4096)
if len(err) > 0:
keepReading = True
sys.stderr.write(err)
sys.stderr.flush()
if sdb.stdout in ready[0]:
out = os.read(sdb.stdout.fileno(), 4096)
if len(out) > 0:
keepReading = True
data.append(out)
if result is not None and not keepReading:
break
result = sdb.poll()
if result == 0:
if expect_trace:
if not data:
print(('No data was captured. Output file was not ' +
'written.'), file=sys.stderr)
sys.exit(1)
else:
# Indicate to the user that the data download is complete.
print(" done\n")
data = ''.join(data)
# Collapse CRLFs that are added by sdb shell.
if data.startswith('\r\n'):
data = data.replace('\r\n', '\n')
# Skip the initial newline.
data = data[1:]
html_prefix = read_asset(script_dir, 'prefix.html')
html_suffix = read_asset(script_dir, 'suffix.html')
html_file = open(html_filename, 'w')
trace_filename = html_filename + '.trace.raw'
trace_file = open(trace_filename, 'w')
html_file.write(html_prefix % (css, js, templates))
size = 4096
dec = zlib.decompressobj()
for chunk in (data[i:i + size] for i in range(0, len(data), size)):
decoded_chunk = dec.decompress(chunk)
html_chunk = decoded_chunk.replace('\n', '\\n\\\n')
html_file.write(html_chunk)
trace_file.write(html_chunk)
html_out = dec.flush().replace('\n', '\\n\\\n')
html_file.write(html_out)
# Write suffix
html_file.write(html_suffix)
html_file.close()
trace_file.close()
pid_parser.parse(trace_filename)
os.remove(trace_filename)
print("\nwrote file://%s\n" % os.path.abspath(options.output_file))
else: # i.e. result != 0
print('sdb returned error code %d' % result, file=sys.stderr)
sys.exit(1)
def read_asset(src_dir, filename):
return open(os.path.join(src_dir, filename)).read()
def get_assets(src_dir, build_dir):
sys.path.append(build_dir)
gen = __import__('generate_standalone_timeline_view', {}, {})
parse_deps = __import__('parse_deps', {}, {})
gen_templates = __import__('generate_template_contents', {}, {})
filenames = gen._get_input_filenames()
load_sequence = parse_deps.calc_load_sequence(filenames, src_dir)
js_files = []
js_flattenizer = "window.FLATTENED = {};\n"
js_flattenizer += "window.FLATTENED_RAW_SCRIPTS = {};\n"
css_files = []
for module in load_sequence:
js_files.append(os.path.relpath(module.filename, src_dir))
js_flattenizer += "window.FLATTENED['%s'] = true;\n" % module.name
for dependent_raw_script_name in module.dependent_raw_script_names:
js_flattenizer += (
"window.FLATTENED_RAW_SCRIPTS['%s'] = true;\n"
% dependent_raw_script_name)
for style_sheet in module.style_sheets:
css_files.append(os.path.relpath(style_sheet.filename, src_dir))
templates = gen_templates.generate_templates()
sys.path.pop()
return (js_files, js_flattenizer, css_files, templates)
compiled_css_tag = """<style type="text/css">%s</style>"""
compiled_js_tag = """<script language="javascript">%s</script>"""
linked_css_tag = """<link rel="stylesheet" href="%s"></link>"""
linked_js_tag = """<script language="javascript" src="%s"></script>"""
if __name__ == '__main__':
main()
|
9111c25da1be1a266d8510dbc30ebbd27e9b1e39
|
a2b20597759990445081057d35d113434cfcf970
|
/stubs/typeshed/typeshed/stubs/pywin32/win32com/axdebug/contexts.pyi
|
c1c9fbfd99d3b10c31ee38a32a3facc925b2699b
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
facebook/pyre-check
|
34059599c02b65605c574f13555229f3b931fd4e
|
fe8ccedc572cc1faa1fd01e9138f65e982875002
|
refs/heads/main
| 2023-09-03T19:10:11.587028
| 2023-09-02T07:40:35
| 2023-09-02T07:40:35
| 110,274,488
| 6,703
| 575
|
MIT
| 2023-09-13T17:02:32
| 2017-11-10T17:31:36
|
OCaml
|
UTF-8
|
Python
| false
| false
| 43
|
pyi
|
contexts.pyi
|
from win32comext.axdebug.contexts import *
|
e1344434cb80ef974b5268a8f6c4bb5e017bb2e8
|
9bc2e2961f6f7c4a47e6eebc897fbf14a4e85d61
|
/studio/cli.py
|
c1c9f46ed6fd49e59634cb0fd13a57231a067274
|
[
"Apache-2.0"
] |
permissive
|
studioml/studio
|
9ee8fd12a9fa0ac09144e2520b1a3be8756ab2fa
|
e8aedf9c15baa872eb7aee4d6b28ad6208a9fca2
|
refs/heads/master
| 2023-08-16T01:54:59.184542
| 2023-01-09T20:06:01
| 2023-01-09T20:06:01
| 91,284,550
| 406
| 59
|
Apache-2.0
| 2023-09-06T17:23:10
| 2017-05-15T01:49:28
|
Python
|
UTF-8
|
Python
| false
| false
| 3,582
|
py
|
cli.py
|
import argparse
import sys
import time
from terminaltables import AsciiTable
from studio import model
from studio.util import logs
_my_logger = None
def print_help():
print('Usage: studio runs [command] arguments')
print('\ncommand can be one of the following:')
print('')
print('\tlist [username] - display experiments')
print('\tstop [experiment] - stop running experiment')
print('\tkill [experiment] - stop and delete experiment')
print('')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--config', help='configuration file', default=None)
parser.add_argument(
'--short', '-s', help='Brief output - names of experiments only',
action='store_true')
cli_args, script_args = parser.parse_known_args(sys.argv)
get_logger().setLevel(10)
if len(script_args) < 2:
get_logger().critical('No command provided!')
parser.print_help()
print_help()
return
cmd = script_args[1]
if cmd == 'list':
_list(script_args[2:], cli_args)
elif cmd == 'stop':
_stop(script_args[2:], cli_args)
elif cmd == 'kill':
_kill(script_args[2:], cli_args)
else:
get_logger().critical('Unknown command ' + cmd)
parser.print_help()
print_help()
return
def _list(args, cli_args):
with model.get_db_provider(cli_args.config) as db:
if len(args) == 0:
experiments = db.get_user_experiments()
elif args[0] == 'project':
assert len(args) == 2
experiments = db.get_project_experiments(args[1])
elif args[0] == 'users':
assert len(args) == 1
users = db.get_users()
for u in users:
print(users[u].get('email'))
return
elif args[0] == 'user':
assert len(args) == 2
users = db.get_users()
user_ids = [u for u in users if users[u].get('email') == args[1]]
assert len(user_ids) == 1, \
'The user with email ' + args[1] + \
'not found!'
experiments = db.get_user_experiments(user_ids[0])
elif args[0] == 'all':
assert len(args) == 1
users = db.get_users()
experiments = []
for u in users:
experiments += db.get_user_experiments(u)
else:
get_logger().critical('Unknown command ' + args[0])
return
if cli_args.short:
for e in experiments:
print(e)
return
experiments = [db.get_experiment(e) for e in experiments]
experiments.sort(key=lambda e: -e.time_added)
table = [['Time added', 'Key', 'Project', 'Status']]
for e in experiments:
table.append([
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(e.time_added)),
e.key,
e.project,
e.status])
print(AsciiTable(table).table)
def _stop(args, cli_args):
with model.get_db_provider(cli_args.config) as db:
for e in args:
get_logger().info('Stopping experiment ' + e)
db.stop_experiment(e)
def _kill(args, cli_args):
with model.get_db_provider(cli_args.config) as db:
for e in args:
get_logger().info('Deleting experiment ' + e)
db.delete_experiment(e)
def get_logger():
global _my_logger
if not _my_logger:
_my_logger = logs.get_logger('studio-runs')
return _my_logger
if __name__ == '__main__':
main()
|
707d9ca6480180b4f68b15bd395f98ca54623c97
|
8819daac35f2739c7ecbd175d728dd9983fc025c
|
/tutorials/data-deduplication/recipes/basic.py
|
fde9c723d8dda02dbb9816dbc5181649be8adb80
|
[
"MIT"
] |
permissive
|
explosion/prodigy-recipes
|
7410cef04c8b99aa01216ea35cd613fea724e472
|
066101d38eb53f33069ff1ce197cd506f3b8462e
|
refs/heads/master
| 2023-09-01T09:59:29.171287
| 2023-08-28T11:33:39
| 2023-08-28T11:33:39
| 113,669,374
| 438
| 116
| null | 2023-09-06T21:29:40
| 2017-12-09T12:57:34
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,202
|
py
|
basic.py
|
import prodigy
from prodigy.components.loaders import JSONL
from jinja2 import Environment, select_autoescape, FileSystemLoader, Markup
@prodigy.recipe(
"duplicate",
dataset=("The dataset to save to", "positional", None, str),
file_path=("The jsonl file with matched items", "positional", None, str),
)
def check_duplicate(dataset, file_path):
"""Annotate yes/no duplicate."""
stream = JSONL(file_path) # load in the JSONL file
stream = add_options(stream) # add options to each task
return {
"dataset": dataset, # save annotations in this dataset
"view_id": "choice", # use the choice interface
"stream": stream
}
def add_options(stream):
env = Environment(
loader=FileSystemLoader('recipes'),
autoescape=select_autoescape(['html', 'xml'])
)
# Helper function to add options to every task in a stream
options = [
{"id": "duplicate", "text": "✅ duplicate"},
{"id": "unique", "text": "❌ unique"},
]
for task in stream:
task["options"] = options
task["html"] = env.get_template("basic.html").render(item1=task['item1'], item2=task['item2'])
yield task
|
989f3352b2dbcccf28ff1e4ce516cfa77cc5b85a
|
29f18e8ddde0379cef7fa00b1a50058be3cafa79
|
/numba/tests/test_linalg.py
|
1ef259e226834b95dbf14014a57ad434bad26326
|
[
"LicenseRef-scancode-secret-labs-2011",
"BSD-3-Clause",
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-free-unknown",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0",
"BSD-2-Clause"
] |
permissive
|
numba/numba
|
9a8345ff5f7d57f0ffec40e39941ebf2684df0d1
|
46059957ad416e68476d1e5f32ccd59f7d5df2bb
|
refs/heads/main
| 2023-08-09T22:29:38.170300
| 2023-08-07T15:00:27
| 2023-08-07T15:00:27
| 3,659,275
| 8,247
| 1,151
|
BSD-2-Clause
| 2023-09-13T14:43:48
| 2012-03-08T11:12:43
|
Python
|
UTF-8
|
Python
| false
| false
| 96,098
|
py
|
test_linalg.py
|
import contextlib
import gc
from itertools import product, cycle
import sys
import warnings
from numbers import Number, Integral
import platform
import numpy as np
from numba import jit, njit, typeof
from numba.core import errors
from numba.tests.support import (TestCase, tag, needs_lapack, needs_blas,
_is_armv7l, EnableNRTStatsMixin)
from .matmul_usecase import matmul_usecase
import unittest
def dot2(a, b):
return np.dot(a, b)
def dot3(a, b, out):
return np.dot(a, b, out=out)
def vdot(a, b):
return np.vdot(a, b)
class TestProduct(EnableNRTStatsMixin, TestCase):
"""
Tests for dot products.
"""
dtypes = (np.float64, np.float32, np.complex128, np.complex64)
def setUp(self):
# Collect leftovers from previous test cases before checking for leaks
gc.collect()
super(TestProduct, self).setUp()
def sample_vector(self, n, dtype):
# Be careful to generate only exactly representable float values,
# to avoid rounding discrepancies between Numpy and Numba
base = np.arange(n)
if issubclass(dtype, np.complexfloating):
return (base * (1 - 0.5j) + 2j).astype(dtype)
else:
return (base * 0.5 - 1).astype(dtype)
def sample_matrix(self, m, n, dtype):
return self.sample_vector(m * n, dtype).reshape((m, n))
@contextlib.contextmanager
def check_contiguity_warning(self, pyfunc):
"""
Check performance warning(s) for non-contiguity.
"""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', errors.NumbaPerformanceWarning)
yield
self.assertGreaterEqual(len(w), 1)
self.assertIs(w[0].category, errors.NumbaPerformanceWarning)
self.assertIn("faster on contiguous arrays", str(w[0].message))
self.assertEqual(w[0].filename, pyfunc.__code__.co_filename)
# This works because our functions are one-liners
self.assertEqual(w[0].lineno, pyfunc.__code__.co_firstlineno + 1)
def check_func(self, pyfunc, cfunc, args):
with self.assertNoNRTLeak():
expected = pyfunc(*args)
got = cfunc(*args)
self.assertPreciseEqual(got, expected, ignore_sign_on_zero=True)
del got, expected
def _aligned_copy(self, arr):
# This exists for armv7l because NumPy wants aligned arrays for the
# `out` arg of functions, but np.empty/np.copy doesn't seem to always
# produce them, in particular for complex dtypes
size = (arr.size + 1) * arr.itemsize + 1
datasize = arr.size * arr.itemsize
tmp = np.empty(size, dtype=np.uint8)
for i in range(arr.itemsize + 1):
new = tmp[i : i + datasize].view(dtype=arr.dtype)
if new.flags.aligned:
break
else:
raise Exception("Could not obtain aligned array")
if arr.flags.c_contiguous:
new = np.reshape(new, arr.shape, order='C')
else:
new = np.reshape(new, arr.shape, order='F')
new[:] = arr[:]
assert new.flags.aligned
return new
def check_func_out(self, pyfunc, cfunc, args, out):
copier = self._aligned_copy if _is_armv7l else np.copy
with self.assertNoNRTLeak():
expected = copier(out)
got = copier(out)
self.assertIs(pyfunc(*args, out=expected), expected)
self.assertIs(cfunc(*args, out=got), got)
self.assertPreciseEqual(got, expected, ignore_sign_on_zero=True)
del got, expected
def assert_mismatching_sizes(self, cfunc, args, is_out=False):
with self.assertRaises(ValueError) as raises:
cfunc(*args)
msg = ("incompatible output array size" if is_out else
"incompatible array sizes")
self.assertIn(msg, str(raises.exception))
def assert_mismatching_dtypes(self, cfunc, args, func_name="np.dot()"):
with self.assertRaises(errors.TypingError) as raises:
cfunc(*args)
self.assertIn("%s arguments must all have the same dtype"
% (func_name,),
str(raises.exception))
def check_dot_vv(self, pyfunc, func_name):
n = 3
cfunc = jit(nopython=True)(pyfunc)
for dtype in self.dtypes:
a = self.sample_vector(n, dtype)
b = self.sample_vector(n, dtype)
self.check_func(pyfunc, cfunc, (a, b))
# Non-contiguous
self.check_func(pyfunc, cfunc, (a[::-1], b[::-1]))
# Mismatching sizes
a = self.sample_vector(n - 1, np.float64)
b = self.sample_vector(n, np.float64)
self.assert_mismatching_sizes(cfunc, (a, b))
# Mismatching dtypes
a = self.sample_vector(n, np.float32)
b = self.sample_vector(n, np.float64)
self.assert_mismatching_dtypes(cfunc, (a, b), func_name=func_name)
@needs_blas
def test_dot_vv(self):
"""
Test vector * vector np.dot()
"""
self.check_dot_vv(dot2, "np.dot()")
@needs_blas
def test_vdot(self):
"""
Test np.vdot()
"""
self.check_dot_vv(vdot, "np.vdot()")
def check_dot_vm(self, pyfunc2, pyfunc3, func_name):
def samples(m, n):
for order in 'CF':
a = self.sample_matrix(m, n, np.float64).copy(order=order)
b = self.sample_vector(n, np.float64)
yield a, b
for dtype in self.dtypes:
a = self.sample_matrix(m, n, dtype)
b = self.sample_vector(n, dtype)
yield a, b
# Non-contiguous
yield a[::-1], b[::-1]
cfunc2 = jit(nopython=True)(pyfunc2)
if pyfunc3 is not None:
cfunc3 = jit(nopython=True)(pyfunc3)
for m, n in [(2, 3),
(3, 0),
(0, 3)
]:
for a, b in samples(m, n):
self.check_func(pyfunc2, cfunc2, (a, b))
self.check_func(pyfunc2, cfunc2, (b, a.T))
if pyfunc3 is not None:
for a, b in samples(m, n):
out = np.empty(m, dtype=a.dtype)
self.check_func_out(pyfunc3, cfunc3, (a, b), out)
self.check_func_out(pyfunc3, cfunc3, (b, a.T), out)
# Mismatching sizes
m, n = 2, 3
a = self.sample_matrix(m, n - 1, np.float64)
b = self.sample_vector(n, np.float64)
self.assert_mismatching_sizes(cfunc2, (a, b))
self.assert_mismatching_sizes(cfunc2, (b, a.T))
if pyfunc3 is not None:
out = np.empty(m, np.float64)
self.assert_mismatching_sizes(cfunc3, (a, b, out))
self.assert_mismatching_sizes(cfunc3, (b, a.T, out))
a = self.sample_matrix(m, m, np.float64)
b = self.sample_vector(m, np.float64)
out = np.empty(m - 1, np.float64)
self.assert_mismatching_sizes(cfunc3, (a, b, out), is_out=True)
self.assert_mismatching_sizes(cfunc3, (b, a.T, out), is_out=True)
# Mismatching dtypes
a = self.sample_matrix(m, n, np.float32)
b = self.sample_vector(n, np.float64)
self.assert_mismatching_dtypes(cfunc2, (a, b), func_name)
if pyfunc3 is not None:
a = self.sample_matrix(m, n, np.float64)
b = self.sample_vector(n, np.float64)
out = np.empty(m, np.float32)
self.assert_mismatching_dtypes(cfunc3, (a, b, out), func_name)
@needs_blas
def test_dot_vm(self):
"""
Test vector * matrix and matrix * vector np.dot()
"""
self.check_dot_vm(dot2, dot3, "np.dot()")
def check_dot_mm(self, pyfunc2, pyfunc3, func_name):
def samples(m, n, k):
for order_a, order_b in product('CF', 'CF'):
a = self.sample_matrix(m, k, np.float64).copy(order=order_a)
b = self.sample_matrix(k, n, np.float64).copy(order=order_b)
yield a, b
for dtype in self.dtypes:
a = self.sample_matrix(m, k, dtype)
b = self.sample_matrix(k, n, dtype)
yield a, b
# Non-contiguous
yield a[::-1], b[::-1]
cfunc2 = jit(nopython=True)(pyfunc2)
if pyfunc3 is not None:
cfunc3 = jit(nopython=True)(pyfunc3)
# Test generic matrix * matrix as well as "degenerate" cases
# where one of the outer dimensions is 1 (i.e. really represents
# a vector, which may select a different implementation),
# one of the matrices is empty, or both matrices are empty.
for m, n, k in [(2, 3, 4), # Generic matrix * matrix
(1, 3, 4), # 2d vector * matrix
(1, 1, 4), # 2d vector * 2d vector
(0, 3, 2), # Empty matrix * matrix, empty output
(3, 0, 2), # Matrix * empty matrix, empty output
(0, 0, 3), # Both arguments empty, empty output
(3, 2, 0), # Both arguments empty, nonempty output
]:
for a, b in samples(m, n, k):
self.check_func(pyfunc2, cfunc2, (a, b))
self.check_func(pyfunc2, cfunc2, (b.T, a.T))
if pyfunc3 is not None:
for a, b in samples(m, n, k):
out = np.empty((m, n), dtype=a.dtype)
self.check_func_out(pyfunc3, cfunc3, (a, b), out)
out = np.empty((n, m), dtype=a.dtype)
self.check_func_out(pyfunc3, cfunc3, (b.T, a.T), out)
# Mismatching sizes
m, n, k = 2, 3, 4
a = self.sample_matrix(m, k - 1, np.float64)
b = self.sample_matrix(k, n, np.float64)
self.assert_mismatching_sizes(cfunc2, (a, b))
if pyfunc3 is not None:
out = np.empty((m, n), np.float64)
self.assert_mismatching_sizes(cfunc3, (a, b, out))
a = self.sample_matrix(m, k, np.float64)
b = self.sample_matrix(k, n, np.float64)
out = np.empty((m, n - 1), np.float64)
self.assert_mismatching_sizes(cfunc3, (a, b, out), is_out=True)
# Mismatching dtypes
a = self.sample_matrix(m, k, np.float32)
b = self.sample_matrix(k, n, np.float64)
self.assert_mismatching_dtypes(cfunc2, (a, b), func_name)
if pyfunc3 is not None:
a = self.sample_matrix(m, k, np.float64)
b = self.sample_matrix(k, n, np.float64)
out = np.empty((m, n), np.float32)
self.assert_mismatching_dtypes(cfunc3, (a, b, out), func_name)
@needs_blas
def test_dot_mm(self):
"""
Test matrix * matrix np.dot()
"""
self.check_dot_mm(dot2, dot3, "np.dot()")
@needs_blas
def test_matmul_vv(self):
"""
Test vector @ vector
"""
self.check_dot_vv(matmul_usecase, "'@'")
@needs_blas
def test_matmul_vm(self):
"""
Test vector @ matrix and matrix @ vector
"""
self.check_dot_vm(matmul_usecase, None, "'@'")
@needs_blas
def test_matmul_mm(self):
"""
Test matrix @ matrix
"""
self.check_dot_mm(matmul_usecase, None, "'@'")
@needs_blas
def test_contiguity_warnings(self):
m, k, n = 2, 3, 4
dtype = np.float64
a = self.sample_matrix(m, k, dtype)[::-1]
b = self.sample_matrix(k, n, dtype)[::-1]
out = np.empty((m, n), dtype)
cfunc = jit(nopython=True)(dot2)
with self.check_contiguity_warning(cfunc.py_func):
cfunc(a, b)
cfunc = jit(nopython=True)(dot3)
with self.check_contiguity_warning(cfunc.py_func):
cfunc(a, b, out)
a = self.sample_vector(n, dtype)[::-1]
b = self.sample_vector(n, dtype)[::-1]
cfunc = jit(nopython=True)(vdot)
with self.check_contiguity_warning(cfunc.py_func):
cfunc(a, b)
# Implementation definitions for the purpose of jitting.
def invert_matrix(a):
return np.linalg.inv(a)
def cholesky_matrix(a):
return np.linalg.cholesky(a)
def eig_matrix(a):
return np.linalg.eig(a)
def eigvals_matrix(a):
return np.linalg.eigvals(a)
def eigh_matrix(a):
return np.linalg.eigh(a)
def eigvalsh_matrix(a):
return np.linalg.eigvalsh(a)
def svd_matrix(a, full_matrices=1):
return np.linalg.svd(a, full_matrices)
def qr_matrix(a):
return np.linalg.qr(a)
def lstsq_system(A, B, rcond=-1):
return np.linalg.lstsq(A, B, rcond)
def solve_system(A, B):
return np.linalg.solve(A, B)
def pinv_matrix(A, rcond=1e-15): # 1e-15 from numpy impl
return np.linalg.pinv(A)
def slogdet_matrix(a):
return np.linalg.slogdet(a)
def det_matrix(a):
return np.linalg.det(a)
def norm_matrix(a, ord=None):
return np.linalg.norm(a, ord)
def cond_matrix(a, p=None):
return np.linalg.cond(a, p)
def matrix_rank_matrix(a, tol=None):
return np.linalg.matrix_rank(a, tol)
def matrix_power_matrix(a, n):
return np.linalg.matrix_power(a, n)
def trace_matrix(a, offset=0):
return np.trace(a, offset)
def trace_matrix_no_offset(a):
return np.trace(a)
def outer_matrix(a, b, out=None):
return np.outer(a, b, out=out)
def kron_matrix(a, b):
return np.kron(a, b)
class TestLinalgBase(EnableNRTStatsMixin, TestCase):
"""
Provides setUp and common data/error modes for testing np.linalg functions.
"""
# supported dtypes
dtypes = (np.float64, np.float32, np.complex128, np.complex64)
def setUp(self):
# Collect leftovers from previous test cases before checking for leaks
gc.collect()
super(TestLinalgBase, self).setUp()
def sample_vector(self, n, dtype):
# Be careful to generate only exactly representable float values,
# to avoid rounding discrepancies between Numpy and Numba
base = np.arange(n)
if issubclass(dtype, np.complexfloating):
return (base * (1 - 0.5j) + 2j).astype(dtype)
else:
return (base * 0.5 + 1).astype(dtype)
def specific_sample_matrix(
self, size, dtype, order, rank=None, condition=None):
"""
Provides a sample matrix with an optionally specified rank or condition
number.
size: (rows, columns), the dimensions of the returned matrix.
dtype: the dtype for the returned matrix.
order: the memory layout for the returned matrix, 'F' or 'C'.
rank: the rank of the matrix, an integer value, defaults to full rank.
condition: the condition number of the matrix (defaults to 1.)
NOTE: Only one of rank or condition may be set.
"""
# default condition
d_cond = 1.
if len(size) != 2:
raise ValueError("size must be a length 2 tuple.")
if order not in ['F', 'C']:
raise ValueError("order must be one of 'F' or 'C'.")
if dtype not in [np.float32, np.float64, np.complex64, np.complex128]:
raise ValueError("dtype must be a numpy floating point type.")
if rank is not None and condition is not None:
raise ValueError("Only one of rank or condition can be specified.")
if condition is None:
condition = d_cond
if condition < 1:
raise ValueError("Condition number must be >=1.")
np.random.seed(0) # repeatable seed
m, n = size
if m < 0 or n < 0:
raise ValueError("Negative dimensions given for matrix shape.")
minmn = min(m, n)
if rank is None:
rv = minmn
else:
if rank <= 0:
raise ValueError("Rank must be greater than zero.")
if not isinstance(rank, Integral):
raise ValueError("Rank must an integer.")
rv = rank
if rank > minmn:
raise ValueError("Rank given greater than full rank.")
if m == 1 or n == 1:
# vector, must be rank 1 (enforced above)
# condition of vector is also 1
if condition != d_cond:
raise ValueError(
"Condition number was specified for a vector (always 1.).")
maxmn = max(m, n)
Q = self.sample_vector(maxmn, dtype).reshape(m, n)
else:
# Build a sample matrix via combining SVD like inputs.
# Create matrices of left and right singular vectors.
# This could use Modified Gram-Schmidt and perhaps be quicker,
# at present it uses QR decompositions to obtain orthonormal
# matrices.
tmp = self.sample_vector(m * m, dtype).reshape(m, m)
U, _ = np.linalg.qr(tmp)
# flip the second array, else for m==n the identity matrix appears
tmp = self.sample_vector(n * n, dtype)[::-1].reshape(n, n)
V, _ = np.linalg.qr(tmp)
# create singular values.
sv = np.linspace(d_cond, condition, rv)
S = np.zeros((m, n))
idx = np.nonzero(np.eye(m, n))
S[idx[0][:rv], idx[1][:rv]] = sv
Q = np.dot(np.dot(U, S), V.T) # construct
Q = np.array(Q, dtype=dtype, order=order) # sort out order/type
return Q
def assert_error(self, cfunc, args, msg, err=ValueError):
with self.assertRaises(err) as raises:
cfunc(*args)
self.assertIn(msg, str(raises.exception))
def assert_non_square(self, cfunc, args):
msg = "Last 2 dimensions of the array must be square."
self.assert_error(cfunc, args, msg, np.linalg.LinAlgError)
def assert_wrong_dtype(self, name, cfunc, args):
msg = "np.linalg.%s() only supported on float and complex arrays" % name
self.assert_error(cfunc, args, msg, errors.TypingError)
def assert_wrong_dimensions(self, name, cfunc, args, la_prefix=True):
prefix = "np.linalg" if la_prefix else "np"
msg = "%s.%s() only supported on 2-D arrays" % (prefix, name)
self.assert_error(cfunc, args, msg, errors.TypingError)
def assert_no_nan_or_inf(self, cfunc, args):
msg = "Array must not contain infs or NaNs."
self.assert_error(cfunc, args, msg, np.linalg.LinAlgError)
def assert_contig_sanity(self, got, expected_contig):
"""
This checks that in a computed result from numba (array, possibly tuple
of arrays) all the arrays are contiguous in memory and that they are
all at least one of "C_CONTIGUOUS" or "F_CONTIGUOUS". The computed
result of the contiguousness is then compared against a hardcoded
expected result.
got: is the computed results from numba
expected_contig: is "C" or "F" and is the expected type of
contiguousness across all input values
(and therefore tests).
"""
if isinstance(got, tuple):
# tuple present, check all results
for a in got:
self.assert_contig_sanity(a, expected_contig)
else:
if not isinstance(got, Number):
# else a single array is present
c_contig = got.flags.c_contiguous
f_contig = got.flags.f_contiguous
# check that the result (possible set of) is at least one of
# C or F contiguous.
msg = "Results are not at least one of all C or F contiguous."
self.assertTrue(c_contig | f_contig, msg)
msg = "Computed contiguousness does not match expected."
if expected_contig == "C":
self.assertTrue(c_contig, msg)
elif expected_contig == "F":
self.assertTrue(f_contig, msg)
else:
raise ValueError("Unknown contig")
def assert_raise_on_singular(self, cfunc, args):
msg = "Matrix is singular to machine precision."
self.assert_error(cfunc, args, msg, err=np.linalg.LinAlgError)
def assert_is_identity_matrix(self, got, rtol=None, atol=None):
"""
Checks if a matrix is equal to the identity matrix.
"""
# check it is square
self.assertEqual(got.shape[-1], got.shape[-2])
# create identity matrix
eye = np.eye(got.shape[-1], dtype=got.dtype)
resolution = 5 * np.finfo(got.dtype).resolution
if rtol is None:
rtol = 10 * resolution
if atol is None:
atol = 100 * resolution # zeros tend to be fuzzy
# check it matches
np.testing.assert_allclose(got, eye, rtol, atol)
def assert_invalid_norm_kind(self, cfunc, args):
"""
For use in norm() and cond() tests.
"""
msg = "Invalid norm order for matrices."
self.assert_error(cfunc, args, msg, ValueError)
def assert_raise_on_empty(self, cfunc, args):
msg = 'Arrays cannot be empty'
self.assert_error(cfunc, args, msg, np.linalg.LinAlgError)
class TestTestLinalgBase(TestCase):
"""
The sample matrix code TestLinalgBase.specific_sample_matrix()
is a bit involved, this class tests it works as intended.
"""
def test_specific_sample_matrix(self):
# add a default test to the ctor, it never runs so doesn't matter
inst = TestLinalgBase('specific_sample_matrix')
sizes = [(7, 1), (11, 5), (5, 11), (3, 3), (1, 7)]
# test loop
for size, dtype, order in product(sizes, inst.dtypes, 'FC'):
m, n = size
minmn = min(m, n)
# test default full rank
A = inst.specific_sample_matrix(size, dtype, order)
self.assertEqual(A.shape, size)
self.assertEqual(np.linalg.matrix_rank(A), minmn)
# test reduced rank if a reduction is possible
if minmn > 1:
rank = minmn - 1
A = inst.specific_sample_matrix(size, dtype, order, rank=rank)
self.assertEqual(A.shape, size)
self.assertEqual(np.linalg.matrix_rank(A), rank)
resolution = 5 * np.finfo(dtype).resolution
# test default condition
A = inst.specific_sample_matrix(size, dtype, order)
self.assertEqual(A.shape, size)
np.testing.assert_allclose(np.linalg.cond(A),
1.,
rtol=resolution,
atol=resolution)
# test specified condition if matrix is > 1D
if minmn > 1:
condition = 10.
A = inst.specific_sample_matrix(
size, dtype, order, condition=condition)
self.assertEqual(A.shape, size)
np.testing.assert_allclose(np.linalg.cond(A),
10.,
rtol=resolution,
atol=resolution)
# check errors are raised appropriately
def check_error(args, msg, err=ValueError):
with self.assertRaises(err) as raises:
inst.specific_sample_matrix(*args)
self.assertIn(msg, str(raises.exception))
# check the checker runs ok
with self.assertRaises(AssertionError) as raises:
msg = "blank"
check_error(((2, 3), np.float64, 'F'), msg, err=ValueError)
# check invalid inputs...
# bad size
msg = "size must be a length 2 tuple."
check_error(((1,), np.float64, 'F'), msg, err=ValueError)
# bad order
msg = "order must be one of 'F' or 'C'."
check_error(((2, 3), np.float64, 'z'), msg, err=ValueError)
# bad type
msg = "dtype must be a numpy floating point type."
check_error(((2, 3), np.int32, 'F'), msg, err=ValueError)
# specifying both rank and condition
msg = "Only one of rank or condition can be specified."
check_error(((2, 3), np.float64, 'F', 1, 1), msg, err=ValueError)
# specifying negative condition
msg = "Condition number must be >=1."
check_error(((2, 3), np.float64, 'F', None, -1), msg, err=ValueError)
# specifying negative matrix dimension
msg = "Negative dimensions given for matrix shape."
check_error(((2, -3), np.float64, 'F'), msg, err=ValueError)
# specifying negative rank
msg = "Rank must be greater than zero."
check_error(((2, 3), np.float64, 'F', -1), msg, err=ValueError)
# specifying a rank greater than maximum rank
msg = "Rank given greater than full rank."
check_error(((2, 3), np.float64, 'F', 4), msg, err=ValueError)
# specifying a condition number for a vector
msg = "Condition number was specified for a vector (always 1.)."
check_error(((1, 3), np.float64, 'F', None, 10), msg, err=ValueError)
# specifying a non integer rank
msg = "Rank must an integer."
check_error(((2, 3), np.float64, 'F', 1.5), msg, err=ValueError)
class TestLinalgInv(TestLinalgBase):
"""
Tests for np.linalg.inv.
"""
@needs_lapack
def test_linalg_inv(self):
"""
Test np.linalg.inv
"""
n = 10
cfunc = jit(nopython=True)(invert_matrix)
def check(a, **kwargs):
expected = invert_matrix(a)
got = cfunc(a)
self.assert_contig_sanity(got, "F")
use_reconstruction = False
# try strict
try:
np.testing.assert_array_almost_equal_nulp(got, expected,
nulp=10)
except AssertionError:
# fall back to reconstruction
use_reconstruction = True
if use_reconstruction:
rec = np.dot(got, a)
self.assert_is_identity_matrix(rec)
# Ensure proper resource management
with self.assertNoNRTLeak():
cfunc(a)
for dtype, order in product(self.dtypes, 'CF'):
a = self.specific_sample_matrix((n, n), dtype, order)
check(a)
# 0 dimensioned matrix
check(np.empty((0, 0)))
# Non square matrix
self.assert_non_square(cfunc, (np.ones((2, 3)),))
# Wrong dtype
self.assert_wrong_dtype("inv", cfunc,
(np.ones((2, 2), dtype=np.int32),))
# Dimension issue
self.assert_wrong_dimensions("inv", cfunc, (np.ones(10),))
# Singular matrix
self.assert_raise_on_singular(cfunc, (np.zeros((2, 2)),))
@needs_lapack
def test_no_input_mutation(self):
X = np.array([[1., 3, 2, 7,],
[-5, 4, 2, 3,],
[9, -3, 1, 1,],
[2, -2, 2, 8,]], order='F')
X_orig = np.copy(X)
@jit(nopython=True)
def ainv(X, test):
if test:
# not executed, but necessary to trigger A ordering in X
X = X[1:2, :]
return np.linalg.inv(X)
expected = ainv.py_func(X, False)
np.testing.assert_allclose(X, X_orig)
got = ainv(X, False)
np.testing.assert_allclose(X, X_orig)
np.testing.assert_allclose(expected, got)
class TestLinalgCholesky(TestLinalgBase):
"""
Tests for np.linalg.cholesky.
"""
def sample_matrix(self, m, dtype, order):
# pd. (positive definite) matrix has eigenvalues in Z+
np.random.seed(0) # repeatable seed
A = np.random.rand(m, m)
# orthonormal q needed to form up q^{-1}*D*q
# no "orth()" in numpy
q, _ = np.linalg.qr(A)
L = np.arange(1, m + 1) # some positive eigenvalues
Q = np.dot(np.dot(q.T, np.diag(L)), q) # construct
Q = np.array(Q, dtype=dtype, order=order) # sort out order/type
return Q
def assert_not_pd(self, cfunc, args):
msg = "Matrix is not positive definite."
self.assert_error(cfunc, args, msg, np.linalg.LinAlgError)
@needs_lapack
def test_linalg_cholesky(self):
"""
Test np.linalg.cholesky
"""
n = 10
cfunc = jit(nopython=True)(cholesky_matrix)
def check(a):
expected = cholesky_matrix(a)
got = cfunc(a)
use_reconstruction = False
# check that the computed results are contig and in the same way
self.assert_contig_sanity(got, "C")
# try strict
try:
np.testing.assert_array_almost_equal_nulp(got, expected,
nulp=10)
except AssertionError:
# fall back to reconstruction
use_reconstruction = True
# try via reconstruction
if use_reconstruction:
rec = np.dot(got, np.conj(got.T))
resolution = 5 * np.finfo(a.dtype).resolution
np.testing.assert_allclose(
a,
rec,
rtol=resolution,
atol=resolution
)
# Ensure proper resource management
with self.assertNoNRTLeak():
cfunc(a)
for dtype, order in product(self.dtypes, 'FC'):
a = self.sample_matrix(n, dtype, order)
check(a)
# 0 dimensioned matrix
check(np.empty((0, 0)))
rn = "cholesky"
# Non square matrices
self.assert_non_square(cfunc, (np.ones((2, 3), dtype=np.float64),))
# Wrong dtype
self.assert_wrong_dtype(rn, cfunc,
(np.ones((2, 2), dtype=np.int32),))
# Dimension issue
self.assert_wrong_dimensions(rn, cfunc,
(np.ones(10, dtype=np.float64),))
# not pd
self.assert_not_pd(cfunc,
(np.ones(4, dtype=np.float64).reshape(2, 2),))
class TestLinalgEigenSystems(TestLinalgBase):
"""
Tests for np.linalg.eig/eigvals.
"""
def sample_matrix(self, m, dtype, order):
# This is a tridiag with the same but skewed values on the diagonals
v = self.sample_vector(m, dtype)
Q = np.diag(v)
idx = np.nonzero(np.eye(Q.shape[0], Q.shape[1], 1))
Q[idx] = v[1:]
idx = np.nonzero(np.eye(Q.shape[0], Q.shape[1], -1))
Q[idx] = v[:-1]
Q = np.array(Q, dtype=dtype, order=order)
return Q
def assert_no_domain_change(self, name, cfunc, args):
msg = name + "() argument must not cause a domain change."
self.assert_error(cfunc, args, msg)
def _check_worker(self, cfunc, name, expected_res_len,
check_for_domain_change):
def check(*args):
expected = cfunc.py_func(*args)
got = cfunc(*args)
a = args[0]
# check that the returned tuple is same length
self.assertEqual(len(expected), len(got))
# and that dimension is correct
res_is_tuple = False
if isinstance(got, tuple):
res_is_tuple = True
self.assertEqual(len(got), expected_res_len)
else: # its an array
self.assertEqual(got.ndim, expected_res_len)
# and that the computed results are contig and in the same way
self.assert_contig_sanity(got, "F")
use_reconstruction = False
# try plain match of each array to np first
for k in range(len(expected)):
try:
np.testing.assert_array_almost_equal_nulp(
got[k], expected[k], nulp=10)
except AssertionError:
# plain match failed, test by reconstruction
use_reconstruction = True
# If plain match fails then reconstruction is used.
# this checks that A*V ~== V*diag(W)
# i.e. eigensystem ties out
# this is required as numpy uses only double precision lapack
# routines and computation of eigenvectors is numerically
# sensitive, numba uses the type specific routines therefore
# sometimes comes out with a different (but entirely
# valid) answer (eigenvectors are not unique etc.).
# This is only applicable if eigenvectors are computed
# along with eigenvalues i.e. result is a tuple.
resolution = 5 * np.finfo(a.dtype).resolution
if use_reconstruction:
if res_is_tuple:
w, v = got
# modify 'a' if hermitian eigensystem functionality is
# being tested. 'L' for use lower part is default and
# the only thing used at present so we conjugate transpose
# the lower part into the upper for use in the
# reconstruction. By construction the sample matrix is
# tridiag so this is just a question of copying the lower
# diagonal into the upper and conjugating on the way.
if name[-1] == 'h':
idxl = np.nonzero(np.eye(a.shape[0], a.shape[1], -1))
idxu = np.nonzero(np.eye(a.shape[0], a.shape[1], 1))
cfunc(*args)
# upper idx must match lower for default uplo="L"
# if complex, conjugate
a[idxu] = np.conj(a[idxl])
# also, only the real part of the diagonals is
# considered in the calculation so the imag is zeroed
# out for the purposes of use in reconstruction.
a[np.diag_indices(a.shape[0])] = np.real(np.diag(a))
lhs = np.dot(a, v)
rhs = np.dot(v, np.diag(w))
np.testing.assert_allclose(
lhs.real,
rhs.real,
rtol=resolution,
atol=resolution
)
if np.iscomplexobj(v):
np.testing.assert_allclose(
lhs.imag,
rhs.imag,
rtol=resolution,
atol=resolution
)
else:
# This isn't technically reconstruction but is here to
# deal with that the order of the returned eigenvalues
# may differ in the case of routines just returning
# eigenvalues and there's no true reconstruction
# available with which to perform a check.
np.testing.assert_allclose(
np.sort(expected),
np.sort(got),
rtol=resolution,
atol=resolution
)
# Ensure proper resource management
with self.assertNoNRTLeak():
cfunc(*args)
return check
def checker_for_linalg_eig(
self, name, func, expected_res_len, check_for_domain_change=None):
"""
Test np.linalg.eig
"""
n = 10
cfunc = jit(nopython=True)(func)
check = self._check_worker(cfunc, name, expected_res_len,
check_for_domain_change)
# The main test loop
for dtype, order in product(self.dtypes, 'FC'):
a = self.sample_matrix(n, dtype, order)
check(a)
# Test both a real and complex type as the impls are different
for ty in [np.float32, np.complex64]:
# 0 dimensioned matrix
check(np.empty((0, 0), dtype=ty))
# Non square matrices
self.assert_non_square(cfunc, (np.ones((2, 3), dtype=ty),))
# Wrong dtype
self.assert_wrong_dtype(name, cfunc,
(np.ones((2, 2), dtype=np.int32),))
# Dimension issue
self.assert_wrong_dimensions(name, cfunc, (np.ones(10, dtype=ty),))
# no nans or infs
self.assert_no_nan_or_inf(cfunc,
(np.array([[1., 2., ], [np.inf, np.nan]],
dtype=ty),))
if check_for_domain_change:
# By design numba does not support dynamic return types, numpy does
# and uses this in the case of returning eigenvalues/vectors of
# a real matrix. The return type of np.linalg.eig(), when
# operating on a matrix in real space depends on the values present
# in the matrix itself (recalling that eigenvalues are the roots of the
# characteristic polynomial of the system matrix, which will by
# construction depend on the values present in the system matrix).
# This test asserts that if a domain change is required on the return
# type, i.e. complex eigenvalues from a real input, an error is raised.
# For complex types, regardless of the value of the imaginary part of
# the returned eigenvalues, a complex type will be returned, this
# follows numpy and fits in with numba.
# First check that the computation is valid (i.e. in complex space)
A = np.array([[1, -2], [2, 1]])
check(A.astype(np.complex128))
# and that the imaginary part is nonzero
l, _ = func(A)
self.assertTrue(np.any(l.imag))
# Now check that the computation fails in real space
for ty in [np.float32, np.float64]:
self.assert_no_domain_change(name, cfunc, (A.astype(ty),))
@needs_lapack
def test_linalg_eig(self):
self.checker_for_linalg_eig("eig", eig_matrix, 2, True)
@needs_lapack
def test_linalg_eigvals(self):
self.checker_for_linalg_eig("eigvals", eigvals_matrix, 1, True)
@needs_lapack
def test_linalg_eigh(self):
self.checker_for_linalg_eig("eigh", eigh_matrix, 2, False)
@needs_lapack
def test_linalg_eigvalsh(self):
self.checker_for_linalg_eig("eigvalsh", eigvalsh_matrix, 1, False)
@needs_lapack
def test_no_input_mutation(self):
# checks inputs are not mutated
for c in (('eig', 2, True),
('eigvals', 1, True),
('eigh', 2, False),
('eigvalsh', 1, False)):
m, nout, domain_change = c
meth = getattr(np.linalg, m)
@jit(nopython=True)
def func(X, test):
if test:
# not executed, but necessary to trigger A ordering in X
X = X[1:2, :]
return meth(X)
check = self._check_worker(func, m, nout, domain_change)
for dtype in (np.float64, np.complex128):
with self.subTest(meth=meth, dtype=dtype):
# trivial system, doesn't matter, just checking if it gets
# mutated
X = np.array([[10., 1, 0, 1],
[1, 9, 0, 0],
[0, 0, 8, 0],
[1, 0, 0, 7],
], order='F', dtype=dtype)
X_orig = np.copy(X)
expected = func.py_func(X, False)
np.testing.assert_allclose(X, X_orig)
got = func(X, False)
np.testing.assert_allclose(X, X_orig)
check(X, False)
class TestLinalgSvd(TestLinalgBase):
"""
Tests for np.linalg.svd.
"""
# This checks that A ~= U*S*V**H, i.e. SV decomposition ties out. This is
# required as NumPy uses only double precision LAPACK routines and
# computation of SVD is numerically sensitive. Numba uses type-specific
# routines and therefore sometimes comes out with a different answer to
# NumPy (orthonormal bases are not unique, etc.).
def check_reconstruction(self, a, got, expected):
u, sv, vt = got
# Check they are dimensionally correct
for k in range(len(expected)):
self.assertEqual(got[k].shape, expected[k].shape)
# Columns in u and rows in vt dictates the working size of s
s = np.zeros((u.shape[1], vt.shape[0]))
np.fill_diagonal(s, sv)
rec = np.dot(np.dot(u, s), vt)
resolution = np.finfo(a.dtype).resolution
np.testing.assert_allclose(
a,
rec,
rtol=10 * resolution,
atol=100 * resolution # zeros tend to be fuzzy
)
@needs_lapack
def test_linalg_svd(self):
"""
Test np.linalg.svd
"""
cfunc = jit(nopython=True)(svd_matrix)
def check(a, **kwargs):
expected = svd_matrix(a, **kwargs)
got = cfunc(a, **kwargs)
# check that the returned tuple is same length
self.assertEqual(len(expected), len(got))
# and that length is 3
self.assertEqual(len(got), 3)
# and that the computed results are contig and in the same way
self.assert_contig_sanity(got, "F")
use_reconstruction = False
# try plain match of each array to np first
for k in range(len(expected)):
try:
np.testing.assert_array_almost_equal_nulp(
got[k], expected[k], nulp=10)
except AssertionError:
# plain match failed, test by reconstruction
use_reconstruction = True
if use_reconstruction:
self.check_reconstruction(a, got, expected)
# Ensure proper resource management
with self.assertNoNRTLeak():
cfunc(a, **kwargs)
# test: column vector, tall, wide, square, row vector
# prime sizes
sizes = [(7, 1), (7, 5), (5, 7), (3, 3), (1, 7)]
# flip on reduced or full matrices
full_matrices = (True, False)
# test loop
for size, dtype, fmat, order in \
product(sizes, self.dtypes, full_matrices, 'FC'):
a = self.specific_sample_matrix(size, dtype, order)
check(a, full_matrices=fmat)
rn = "svd"
# Wrong dtype
self.assert_wrong_dtype(rn, cfunc,
(np.ones((2, 2), dtype=np.int32),))
# Dimension issue
self.assert_wrong_dimensions(rn, cfunc,
(np.ones(10, dtype=np.float64),))
# no nans or infs
self.assert_no_nan_or_inf(cfunc,
(np.array([[1., 2., ], [np.inf, np.nan]],
dtype=np.float64),))
# empty
for sz in [(0, 1), (1, 0), (0, 0)]:
args = (np.empty(sz), True)
self.assert_raise_on_empty(cfunc, args)
@needs_lapack
def test_no_input_mutation(self):
X = np.array([[1., 3, 2, 7,],
[-5, 4, 2, 3,],
[9, -3, 1, 1,],
[2, -2, 2, 8,]], order='F')
X_orig = np.copy(X)
@jit(nopython=True)
def func(X, test):
if test:
# not executed, but necessary to trigger A ordering in X
X = X[1:2, :]
return np.linalg.svd(X)
expected = func.py_func(X, False)
np.testing.assert_allclose(X, X_orig)
got = func(X, False)
np.testing.assert_allclose(X, X_orig)
try:
for e_a, g_a in zip(expected, got):
np.testing.assert_allclose(e_a, g_a)
except AssertionError:
self.check_reconstruction(X, got, expected)
class TestLinalgQr(TestLinalgBase):
"""
Tests for np.linalg.qr.
"""
@needs_lapack
def test_linalg_qr(self):
"""
Test np.linalg.qr
"""
cfunc = jit(nopython=True)(qr_matrix)
def check(a, **kwargs):
expected = qr_matrix(a, **kwargs)
got = cfunc(a, **kwargs)
# check that the returned tuple is same length
self.assertEqual(len(expected), len(got))
# and that length is 2
self.assertEqual(len(got), 2)
# and that the computed results are contig and in the same way
self.assert_contig_sanity(got, "F")
use_reconstruction = False
# try plain match of each array to np first
for k in range(len(expected)):
try:
np.testing.assert_array_almost_equal_nulp(
got[k], expected[k], nulp=10)
except AssertionError:
# plain match failed, test by reconstruction
use_reconstruction = True
# if plain match fails then reconstruction is used.
# this checks that A ~= Q*R and that (Q^H)*Q = I
# i.e. QR decomposition ties out
# this is required as numpy uses only double precision lapack
# routines and computation of qr is numerically
# sensitive, numba using the type specific routines therefore
# sometimes comes out with a different answer (orthonormal bases
# are not unique etc.).
if use_reconstruction:
q, r = got
# check they are dimensionally correct
for k in range(len(expected)):
self.assertEqual(got[k].shape, expected[k].shape)
# check A=q*r
rec = np.dot(q, r)
resolution = np.finfo(a.dtype).resolution
np.testing.assert_allclose(
a,
rec,
rtol=10 * resolution,
atol=100 * resolution # zeros tend to be fuzzy
)
# check q is orthonormal
self.assert_is_identity_matrix(np.dot(np.conjugate(q.T), q))
# Ensure proper resource management
with self.assertNoNRTLeak():
cfunc(a, **kwargs)
# test: column vector, tall, wide, square, row vector
# prime sizes
sizes = [(7, 1), (11, 5), (5, 11), (3, 3), (1, 7)]
# test loop
for size, dtype, order in \
product(sizes, self.dtypes, 'FC'):
a = self.specific_sample_matrix(size, dtype, order)
check(a)
rn = "qr"
# Wrong dtype
self.assert_wrong_dtype(rn, cfunc,
(np.ones((2, 2), dtype=np.int32),))
# Dimension issue
self.assert_wrong_dimensions(rn, cfunc,
(np.ones(10, dtype=np.float64),))
# no nans or infs
self.assert_no_nan_or_inf(cfunc,
(np.array([[1., 2., ], [np.inf, np.nan]],
dtype=np.float64),))
# empty
for sz in [(0, 1), (1, 0), (0, 0)]:
self.assert_raise_on_empty(cfunc, (np.empty(sz),))
@needs_lapack
def test_no_input_mutation(self):
X = np.array([[1., 3, 2, 7,],
[-5, 4, 2, 3,],
[9, -3, 1, 1,],
[2, -2, 2, 8,]], order='F')
X_orig = np.copy(X)
@jit(nopython=True)
def func(X, test):
if test:
# not executed, but necessary to trigger A ordering in X
X = X[1:2, :]
return np.linalg.qr(X)
expected = func.py_func(X, False)
np.testing.assert_allclose(X, X_orig)
got = func(X, False)
np.testing.assert_allclose(X, X_orig)
for e_a, g_a in zip(expected, got):
np.testing.assert_allclose(e_a, g_a)
class TestLinalgSystems(TestLinalgBase):
"""
Base class for testing "system" solvers from np.linalg.
Namely np.linalg.solve() and np.linalg.lstsq().
"""
# check for RHS with dimension > 2 raises
def assert_wrong_dimensions_1D(self, name, cfunc, args, la_prefix=True):
prefix = "np.linalg" if la_prefix else "np"
msg = "%s.%s() only supported on 1 and 2-D arrays" % (prefix, name)
self.assert_error(cfunc, args, msg, errors.TypingError)
# check that a dimensionally invalid system raises
def assert_dimensionally_invalid(self, cfunc, args):
msg = "Incompatible array sizes, system is not dimensionally valid."
self.assert_error(cfunc, args, msg, np.linalg.LinAlgError)
# check that args with differing dtypes raise
def assert_homogeneous_dtypes(self, name, cfunc, args):
msg = "np.linalg.%s() only supports inputs that have homogeneous dtypes." % name
self.assert_error(cfunc, args, msg, errors.TypingError)
class TestLinalgLstsq(TestLinalgSystems):
"""
Tests for np.linalg.lstsq.
"""
# NOTE: The testing of this routine is hard as it has to handle numpy
# using double precision routines on single precision input, this has
# a knock on effect especially in rank deficient cases and cases where
# conditioning is generally poor. As a result computed ranks can differ
# and consequently the calculated residual can differ.
# The tests try and deal with this as best as they can through the use
# of reconstruction and measures like residual norms.
# Suggestions for improvements are welcomed!
@needs_lapack
def test_linalg_lstsq(self):
"""
Test np.linalg.lstsq
"""
cfunc = jit(nopython=True)(lstsq_system)
def check(A, B, **kwargs):
expected = lstsq_system(A, B, **kwargs)
got = cfunc(A, B, **kwargs)
# check that the returned tuple is same length
self.assertEqual(len(expected), len(got))
# and that length is 4
self.assertEqual(len(got), 4)
# and that the computed results are contig and in the same way
self.assert_contig_sanity(got, "C")
use_reconstruction = False
# check the ranks are the same and continue to a standard
# match if that is the case (if ranks differ, then output
# in e.g. residual array is of different size!).
try:
self.assertEqual(got[2], expected[2])
# try plain match of each array to np first
for k in range(len(expected)):
try:
np.testing.assert_array_almost_equal_nulp(
got[k], expected[k], nulp=10)
except AssertionError:
# plain match failed, test by reconstruction
use_reconstruction = True
except AssertionError:
use_reconstruction = True
if use_reconstruction:
x, res, rank, s = got
# indicies in the output which are ndarrays
out_array_idx = [0, 1, 3]
try:
# check the ranks are the same
self.assertEqual(rank, expected[2])
# check they are dimensionally correct, skip [2] = rank.
for k in out_array_idx:
if isinstance(expected[k], np.ndarray):
self.assertEqual(got[k].shape, expected[k].shape)
except AssertionError:
# check the rank differs by 1. (numerical fuzz)
self.assertTrue(abs(rank - expected[2]) < 2)
# check if A*X = B
resolution = np.finfo(A.dtype).resolution
try:
# this will work so long as the conditioning is
# ok and the rank is full
rec = np.dot(A, x)
np.testing.assert_allclose(
B,
rec,
rtol=10 * resolution,
atol=10 * resolution
)
except AssertionError:
# system is probably under/over determined and/or
# poorly conditioned. Check slackened equality
# and that the residual norm is the same.
for k in out_array_idx:
try:
np.testing.assert_allclose(
expected[k],
got[k],
rtol=100 * resolution,
atol=100 * resolution
)
except AssertionError:
# check the fail is likely due to bad conditioning
c = np.linalg.cond(A)
self.assertGreater(10 * c, (1. / resolution))
# make sure the residual 2-norm is ok
# if this fails its probably due to numpy using double
# precision LAPACK routines for singles.
res_expected = np.linalg.norm(
B - np.dot(A, expected[0]))
res_got = np.linalg.norm(B - np.dot(A, x))
# rtol = 10. as all the systems are products of orthonormals
# and on the small side (rows, cols) < 100.
np.testing.assert_allclose(
res_expected, res_got, rtol=10.)
# Ensure proper resource management
with self.assertNoNRTLeak():
cfunc(A, B, **kwargs)
# test: column vector, tall, wide, square, row vector
# prime sizes, the A's
sizes = [(7, 1), (11, 5), (5, 11), (3, 3), (1, 7)]
# compatible B's for Ax=B must have same number of rows and 1 or more
# columns
# This test takes ages! So combinations are trimmed via cycling
# gets a dtype
cycle_dt = cycle(self.dtypes)
orders = ['F', 'C']
# gets a memory order flag
cycle_order = cycle(orders)
# a specific condition number to use in the following tests
# there is nothing special about it other than it is not magic
specific_cond = 10.
# inner test loop, extracted as there's additional logic etc required
# that'd end up with this being repeated a lot
def inner_test_loop_fn(A, dt, **kwargs):
# test solve Ax=B for (column, matrix) B, same dtype as A
b_sizes = (1, 13)
for b_size in b_sizes:
# check 2D B
b_order = next(cycle_order)
B = self.specific_sample_matrix(
(A.shape[0], b_size), dt, b_order)
check(A, B, **kwargs)
# check 1D B
b_order = next(cycle_order)
tmp = B[:, 0].copy(order=b_order)
check(A, tmp, **kwargs)
# test loop
for a_size in sizes:
dt = next(cycle_dt)
a_order = next(cycle_order)
# A full rank, well conditioned system
A = self.specific_sample_matrix(a_size, dt, a_order)
# run the test loop
inner_test_loop_fn(A, dt)
m, n = a_size
minmn = min(m, n)
# operations that only make sense with a 2D matrix system
if m != 1 and n != 1:
# Test a rank deficient system
r = minmn - 1
A = self.specific_sample_matrix(
a_size, dt, a_order, rank=r)
# run the test loop
inner_test_loop_fn(A, dt)
# Test a system with a given condition number for use in
# testing the rcond parameter.
# This works because the singular values in the
# specific_sample_matrix code are linspace (1, cond, [0... if
# rank deficient])
A = self.specific_sample_matrix(
a_size, dt, a_order, condition=specific_cond)
# run the test loop
rcond = 1. / specific_cond
approx_half_rank_rcond = minmn * rcond
inner_test_loop_fn(A, dt,
rcond=approx_half_rank_rcond)
# check empty arrays
empties = [
[(0, 1), (1,)], # empty A, valid b
[(1, 0), (1,)], # empty A, valid b
[(1, 1), (0,)], # valid A, empty 1D b
[(1, 1), (1, 0)], # valid A, empty 2D b
]
for A, b in empties:
args = (np.empty(A), np.empty(b))
self.assert_raise_on_empty(cfunc, args)
# Test input validation
ok = np.array([[1., 2.], [3., 4.]], dtype=np.float64)
# check ok input is ok
cfunc, (ok, ok)
# check bad inputs
rn = "lstsq"
# Wrong dtype
bad = np.array([[1, 2], [3, 4]], dtype=np.int32)
self.assert_wrong_dtype(rn, cfunc, (ok, bad))
self.assert_wrong_dtype(rn, cfunc, (bad, ok))
# different dtypes
bad = np.array([[1, 2], [3, 4]], dtype=np.float32)
self.assert_homogeneous_dtypes(rn, cfunc, (ok, bad))
self.assert_homogeneous_dtypes(rn, cfunc, (bad, ok))
# Dimension issue
bad = np.array([1, 2], dtype=np.float64)
self.assert_wrong_dimensions(rn, cfunc, (bad, ok))
# no nans or infs
bad = np.array([[1., 2., ], [np.inf, np.nan]], dtype=np.float64)
self.assert_no_nan_or_inf(cfunc, (ok, bad))
self.assert_no_nan_or_inf(cfunc, (bad, ok))
# check 1D is accepted for B (2D is done previously)
# and then that anything of higher dimension raises
oneD = np.array([1., 2.], dtype=np.float64)
cfunc, (ok, oneD)
bad = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=np.float64)
self.assert_wrong_dimensions_1D(rn, cfunc, (ok, bad))
# check a dimensionally invalid system raises (1D and 2D cases
# checked)
bad1D = np.array([1.], dtype=np.float64)
bad2D = np.array([[1.], [2.], [3.]], dtype=np.float64)
self.assert_dimensionally_invalid(cfunc, (ok, bad1D))
self.assert_dimensionally_invalid(cfunc, (ok, bad2D))
@needs_lapack
def test_issue3368(self):
X = np.array([[1., 7.54, 6.52],
[1., 2.70, 4.00],
[1., 2.50, 3.80],
[1., 1.15, 5.64],
[1., 4.22, 3.27],
[1., 1.41, 5.70],], order='F')
X_orig = np.copy(X)
y = np.array([1., 2., 3., 4., 5., 6.])
@jit(nopython=True)
def f2(X, y, test):
if test:
# never executed, but necessary to trigger the bug
X = X[1:2, :]
return np.linalg.lstsq(X, y)
f2(X, y, False)
np.testing.assert_allclose(X, X_orig)
class TestLinalgSolve(TestLinalgSystems):
"""
Tests for np.linalg.solve.
"""
@needs_lapack
def test_linalg_solve(self):
"""
Test np.linalg.solve
"""
cfunc = jit(nopython=True)(solve_system)
def check(a, b, **kwargs):
expected = solve_system(a, b, **kwargs)
got = cfunc(a, b, **kwargs)
# check that the computed results are contig and in the same way
self.assert_contig_sanity(got, "F")
use_reconstruction = False
# try plain match of the result first
try:
np.testing.assert_array_almost_equal_nulp(
got, expected, nulp=10)
except AssertionError:
# plain match failed, test by reconstruction
use_reconstruction = True
# If plain match fails then reconstruction is used,
# this checks that AX ~= B.
# Plain match can fail due to numerical fuzziness associated
# with system size and conditioning, or more simply from
# numpy using double precision routines for computation that
# could be done in single precision (which is what numba does).
# Therefore minor differences in results can appear due to
# e.g. numerical roundoff being different between two precisions.
if use_reconstruction:
# check they are dimensionally correct
self.assertEqual(got.shape, expected.shape)
# check AX=B
rec = np.dot(a, got)
resolution = np.finfo(a.dtype).resolution
np.testing.assert_allclose(
b,
rec,
rtol=10 * resolution,
atol=100 * resolution # zeros tend to be fuzzy
)
# Ensure proper resource management
with self.assertNoNRTLeak():
cfunc(a, b, **kwargs)
# test: prime size squares
sizes = [(1, 1), (3, 3), (7, 7)]
# test loop
for size, dtype, order in \
product(sizes, self.dtypes, 'FC'):
A = self.specific_sample_matrix(size, dtype, order)
b_sizes = (1, 13)
for b_size, b_order in product(b_sizes, 'FC'):
# check 2D B
B = self.specific_sample_matrix(
(A.shape[0], b_size), dtype, b_order)
check(A, B)
# check 1D B
tmp = B[:, 0].copy(order=b_order)
check(A, tmp)
# check empty
cfunc(np.empty((0, 0)), np.empty((0,)))
# Test input validation
ok = np.array([[1., 0.], [0., 1.]], dtype=np.float64)
# check ok input is ok
cfunc(ok, ok)
# check bad inputs
rn = "solve"
# Wrong dtype
bad = np.array([[1, 0], [0, 1]], dtype=np.int32)
self.assert_wrong_dtype(rn, cfunc, (ok, bad))
self.assert_wrong_dtype(rn, cfunc, (bad, ok))
# different dtypes
bad = np.array([[1, 2], [3, 4]], dtype=np.float32)
self.assert_homogeneous_dtypes(rn, cfunc, (ok, bad))
self.assert_homogeneous_dtypes(rn, cfunc, (bad, ok))
# Dimension issue
bad = np.array([1, 0], dtype=np.float64)
self.assert_wrong_dimensions(rn, cfunc, (bad, ok))
# no nans or infs
bad = np.array([[1., 0., ], [np.inf, np.nan]], dtype=np.float64)
self.assert_no_nan_or_inf(cfunc, (ok, bad))
self.assert_no_nan_or_inf(cfunc, (bad, ok))
# check 1D is accepted for B (2D is done previously)
# and then that anything of higher dimension raises
ok_oneD = np.array([1., 2.], dtype=np.float64)
cfunc(ok, ok_oneD)
bad = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=np.float64)
self.assert_wrong_dimensions_1D(rn, cfunc, (ok, bad))
# check an invalid system raises (1D and 2D cases checked)
bad1D = np.array([1.], dtype=np.float64)
bad2D = np.array([[1.], [2.], [3.]], dtype=np.float64)
self.assert_dimensionally_invalid(cfunc, (ok, bad1D))
self.assert_dimensionally_invalid(cfunc, (ok, bad2D))
# check that a singular system raises
bad2D = self.specific_sample_matrix((2, 2), np.float64, 'C', rank=1)
self.assert_raise_on_singular(cfunc, (bad2D, ok))
@needs_lapack
def test_no_input_mutation(self):
X = np.array([[1., 1, 1, 1],
[0., 1, 1, 1],
[0., 0, 1, 1],
[1., 0, 0, 1],], order='F')
X_orig = np.copy(X)
y = np.array([1., 2., 3., 4])
y_orig = np.copy(y)
@jit(nopython=True)
def func(X, y, test):
if test:
# not executed, triggers A order in X
X = X[1:2, :]
return np.linalg.solve(X, y)
expected = func.py_func(X, y, False)
np.testing.assert_allclose(X, X_orig)
np.testing.assert_allclose(y, y_orig)
got = func(X, y, False)
np.testing.assert_allclose(X, X_orig)
np.testing.assert_allclose(y, y_orig)
np.testing.assert_allclose(expected, got)
class TestLinalgPinv(TestLinalgBase):
"""
Tests for np.linalg.pinv.
"""
@needs_lapack
def test_linalg_pinv(self):
"""
Test np.linalg.pinv
"""
cfunc = jit(nopython=True)(pinv_matrix)
def check(a, **kwargs):
expected = pinv_matrix(a, **kwargs)
got = cfunc(a, **kwargs)
# check that the computed results are contig and in the same way
self.assert_contig_sanity(got, "F")
use_reconstruction = False
# try plain match of each array to np first
try:
np.testing.assert_array_almost_equal_nulp(
got, expected, nulp=10)
except AssertionError:
# plain match failed, test by reconstruction
use_reconstruction = True
# If plain match fails then reconstruction is used.
# This can occur due to numpy using double precision
# LAPACK when single can be used, this creates round off
# problems. Also, if the matrix has machine precision level
# zeros in its singular values then the singular vectors are
# likely to vary depending on round off.
if use_reconstruction:
# check they are dimensionally correct
self.assertEqual(got.shape, expected.shape)
# check pinv(A)*A~=eye
# if the problem is numerical fuzz then this will probably
# work, if the problem is rank deficiency then it won't!
rec = np.dot(got, a)
try:
self.assert_is_identity_matrix(rec)
except AssertionError:
# check A=pinv(pinv(A))
resolution = 5 * np.finfo(a.dtype).resolution
rec = cfunc(got)
np.testing.assert_allclose(
rec,
a,
rtol=10 * resolution,
atol=100 * resolution # zeros tend to be fuzzy
)
if a.shape[0] >= a.shape[1]:
# if it is overdetermined or fully determined
# use numba lstsq function (which is type specific) to
# compute the inverse and check against that.
lstsq = jit(nopython=True)(lstsq_system)
lstsq_pinv = lstsq(
a, np.eye(
a.shape[0]).astype(
a.dtype), **kwargs)[0]
np.testing.assert_allclose(
got,
lstsq_pinv,
rtol=10 * resolution,
atol=100 * resolution # zeros tend to be fuzzy
)
# check the 2 norm of the difference is small
self.assertLess(np.linalg.norm(got - expected), resolution)
# Ensure proper resource management
with self.assertNoNRTLeak():
cfunc(a, **kwargs)
# test: column vector, tall, wide, square, row vector
# prime sizes
sizes = [(7, 1), (11, 5), (5, 11), (3, 3), (1, 7)]
# When required, a specified condition number
specific_cond = 10.
# test loop
for size, dtype, order in \
product(sizes, self.dtypes, 'FC'):
# check a full rank matrix
a = self.specific_sample_matrix(size, dtype, order)
check(a)
m, n = size
if m != 1 and n != 1:
# check a rank deficient matrix
minmn = min(m, n)
a = self.specific_sample_matrix(size, dtype, order,
condition=specific_cond)
rcond = 1. / specific_cond
approx_half_rank_rcond = minmn * rcond
check(a, rcond=approx_half_rank_rcond)
# check empty
for sz in [(0, 1), (1, 0)]:
check(np.empty(sz))
rn = "pinv"
# Wrong dtype
self.assert_wrong_dtype(rn, cfunc,
(np.ones((2, 2), dtype=np.int32),))
# Dimension issue
self.assert_wrong_dimensions(rn, cfunc,
(np.ones(10, dtype=np.float64),))
# no nans or infs
self.assert_no_nan_or_inf(cfunc,
(np.array([[1., 2., ], [np.inf, np.nan]],
dtype=np.float64),))
@needs_lapack
def test_issue5870(self):
# testing for mutation of input matrix
@jit(nopython=True)
def some_fn(v):
return np.linalg.pinv(v[0])
v_data = np.array([[1., 3, 2, 7,],
[-5, 4, 2, 3,],
[9, -3, 1, 1,],
[2, -2, 2, 8,]], order='F')
v_orig = np.copy(v_data)
reshaped_v = v_data.reshape((1, 4, 4))
expected = some_fn.py_func(reshaped_v)
np.testing.assert_allclose(v_data, v_orig)
got = some_fn(reshaped_v)
np.testing.assert_allclose(v_data, v_orig)
np.testing.assert_allclose(expected, got)
class TestLinalgDetAndSlogdet(TestLinalgBase):
"""
Tests for np.linalg.det. and np.linalg.slogdet.
Exactly the same inputs are used for both tests as
det() is a trivial function of slogdet(), the tests
are therefore combined.
"""
def check_det(self, cfunc, a, **kwargs):
expected = det_matrix(a, **kwargs)
got = cfunc(a, **kwargs)
resolution = 5 * np.finfo(a.dtype).resolution
# check the determinants are the same
np.testing.assert_allclose(got, expected, rtol=resolution)
# Ensure proper resource management
with self.assertNoNRTLeak():
cfunc(a, **kwargs)
def check_slogdet(self, cfunc, a, **kwargs):
expected = slogdet_matrix(a, **kwargs)
got = cfunc(a, **kwargs)
# As numba returns python floats types and numpy returns
# numpy float types, some more adjustment and different
# types of comparison than those used with array based
# results is required.
# check that the returned tuple is same length
self.assertEqual(len(expected), len(got))
# and that length is 2
self.assertEqual(len(got), 2)
# check that the domain of the results match
for k in range(2):
self.assertEqual(
np.iscomplexobj(got[k]),
np.iscomplexobj(expected[k]))
# turn got[0] into the same dtype as `a`
# this is so checking with nulp will work
got_conv = a.dtype.type(got[0])
np.testing.assert_array_almost_equal_nulp(
got_conv, expected[0], nulp=10)
# compare log determinant magnitude with a more fuzzy value
# as numpy values come from higher precision lapack routines
resolution = 5 * np.finfo(a.dtype).resolution
np.testing.assert_allclose(
got[1], expected[1], rtol=resolution, atol=resolution)
# Ensure proper resource management
with self.assertNoNRTLeak():
cfunc(a, **kwargs)
def do_test(self, rn, check, cfunc):
# test: 1x1 as it is unusual, 4x4 as it is even and 7x7 as is it odd!
sizes = [(1, 1), (4, 4), (7, 7)]
# test loop
for size, dtype, order in \
product(sizes, self.dtypes, 'FC'):
# check a full rank matrix
a = self.specific_sample_matrix(size, dtype, order)
check(cfunc, a)
# use a matrix of zeros to trip xgetrf U(i,i)=0 singular test
for dtype, order in product(self.dtypes, 'FC'):
a = np.zeros((3, 3), dtype=dtype)
check(cfunc, a)
# check empty
check(cfunc, np.empty((0, 0)))
# Wrong dtype
self.assert_wrong_dtype(rn, cfunc,
(np.ones((2, 2), dtype=np.int32),))
# Dimension issue
self.assert_wrong_dimensions(rn, cfunc,
(np.ones(10, dtype=np.float64),))
# no nans or infs
self.assert_no_nan_or_inf(cfunc,
(np.array([[1., 2., ], [np.inf, np.nan]],
dtype=np.float64),))
@needs_lapack
def test_linalg_det(self):
cfunc = jit(nopython=True)(det_matrix)
self.do_test("det", self.check_det, cfunc)
@needs_lapack
def test_linalg_slogdet(self):
cfunc = jit(nopython=True)(slogdet_matrix)
self.do_test("slogdet", self.check_slogdet, cfunc)
@needs_lapack
def test_no_input_mutation(self):
X = np.array([[1., 3, 2, 7,],
[-5, 4, 2, 3,],
[9, -3, 1, 1,],
[2, -2, 2, 8,]], order='F')
X_orig = np.copy(X)
@jit(nopython=True)
def func(X, test):
if test:
# not executed, but necessary to trigger A ordering in X
X = X[1:2, :]
return np.linalg.slogdet(X)
expected = func.py_func(X, False)
np.testing.assert_allclose(X, X_orig)
got = func(X, False)
np.testing.assert_allclose(X, X_orig)
np.testing.assert_allclose(expected, got)
# Use TestLinalgSystems as a base to get access to additional
# testing for 1 and 2D inputs.
class TestLinalgNorm(TestLinalgSystems):
"""
Tests for np.linalg.norm.
"""
@needs_lapack
def test_linalg_norm(self):
"""
Test np.linalg.norm
"""
cfunc = jit(nopython=True)(norm_matrix)
def check(a, **kwargs):
expected = norm_matrix(a, **kwargs)
got = cfunc(a, **kwargs)
# All results should be in the real domain
self.assertTrue(not np.iscomplexobj(got))
resolution = 5 * np.finfo(a.dtype).resolution
# check the norms are the same to the arg `a` precision
np.testing.assert_allclose(got, expected, rtol=resolution)
# Ensure proper resource management
with self.assertNoNRTLeak():
cfunc(a, **kwargs)
# Check 1D inputs
sizes = [1, 4, 7]
nrm_types = [None, np.inf, -np.inf, 0, 1, -1, 2, -2, 5, 6.7, -4.3]
# standard 1D input
for size, dtype, nrm_type in \
product(sizes, self.dtypes, nrm_types):
a = self.sample_vector(size, dtype)
check(a, ord=nrm_type)
# sliced 1D input
for dtype, nrm_type in \
product(self.dtypes, nrm_types):
a = self.sample_vector(10, dtype)[::3]
check(a, ord=nrm_type)
# Check 2D inputs:
# test: column vector, tall, wide, square, row vector
# prime sizes
sizes = [(7, 1), (11, 5), (5, 11), (3, 3), (1, 7)]
nrm_types = [None, np.inf, -np.inf, 1, -1, 2, -2]
# standard 2D input
for size, dtype, order, nrm_type in \
product(sizes, self.dtypes, 'FC', nrm_types):
# check a full rank matrix
a = self.specific_sample_matrix(size, dtype, order)
check(a, ord=nrm_type)
# check 2D slices work for the case where xnrm2 is called from
# BLAS (ord=None) to make sure it is working ok.
nrm_types = [None]
for dtype, nrm_type, order in \
product(self.dtypes, nrm_types, 'FC'):
a = self.specific_sample_matrix((17, 13), dtype, order)
# contig for C order
check(a[:3], ord=nrm_type)
# contig for Fortran order
check(a[:, 3:], ord=nrm_type)
# contig for neither order
check(a[1, 4::3], ord=nrm_type)
# check that numba returns zero for empty arrays. Numpy returns zero
# for most norm types and raises ValueError for +/-np.inf.
# there is not a great deal of consistency in Numpy's response so
# it is not being emulated in Numba
for dtype, nrm_type, order in \
product(self.dtypes, nrm_types, 'FC'):
a = np.empty((0,), dtype=dtype, order=order)
self.assertEqual(cfunc(a, nrm_type), 0.0)
a = np.empty((0, 0), dtype=dtype, order=order)
self.assertEqual(cfunc(a, nrm_type), 0.0)
rn = "norm"
# Wrong dtype
self.assert_wrong_dtype(rn, cfunc,
(np.ones((2, 2), dtype=np.int32),))
# Dimension issue, reuse the test from the TestLinalgSystems class
self.assert_wrong_dimensions_1D(
rn, cfunc, (np.ones(
12, dtype=np.float64).reshape(
2, 2, 3),))
# no nans or infs for 2d case when SVD is used (e.g 2-norm)
self.assert_no_nan_or_inf(cfunc,
(np.array([[1., 2.], [np.inf, np.nan]],
dtype=np.float64), 2))
# assert 2D input raises for an invalid norm kind kwarg
self.assert_invalid_norm_kind(cfunc, (np.array([[1., 2.], [3., 4.]],
dtype=np.float64), 6))
class TestLinalgCond(TestLinalgBase):
"""
Tests for np.linalg.cond.
"""
@needs_lapack
def test_linalg_cond(self):
"""
Test np.linalg.cond
"""
cfunc = jit(nopython=True)(cond_matrix)
def check(a, **kwargs):
expected = cond_matrix(a, **kwargs)
got = cfunc(a, **kwargs)
# All results should be in the real domain
self.assertTrue(not np.iscomplexobj(got))
resolution = 5 * np.finfo(a.dtype).resolution
# check the cond is the same to the arg `a` precision
np.testing.assert_allclose(got, expected, rtol=resolution)
# Ensure proper resource management
with self.assertNoNRTLeak():
cfunc(a, **kwargs)
# valid p values (used to indicate norm type)
ps = [None, np.inf, -np.inf, 1, -1, 2, -2]
sizes = [(3, 3), (7, 7)]
for size, dtype, order, p in \
product(sizes, self.dtypes, 'FC', ps):
a = self.specific_sample_matrix(size, dtype, order)
check(a, p=p)
# When p=None non-square matrices are accepted.
sizes = [(7, 1), (11, 5), (5, 11), (1, 7)]
for size, dtype, order in \
product(sizes, self.dtypes, 'FC'):
a = self.specific_sample_matrix(size, dtype, order)
check(a)
# empty
for sz in [(0, 1), (1, 0), (0, 0)]:
self.assert_raise_on_empty(cfunc, (np.empty(sz),))
# singular systems to trip divide-by-zero
x = np.array([[1, 0], [0, 0]], dtype=np.float64)
check(x)
check(x, p=2)
x = np.array([[0, 0], [0, 0]], dtype=np.float64)
check(x, p=-2)
# try an ill-conditioned system with 2-norm, make sure np raises an
# overflow warning as the result is `+inf` and that the result from
# numba matches.
with warnings.catch_warnings():
a = np.array([[1.e308, 0], [0, 0.1]], dtype=np.float64)
warnings.simplefilter("ignore", RuntimeWarning)
check(a)
rn = "cond"
# Wrong dtype
self.assert_wrong_dtype(rn, cfunc,
(np.ones((2, 2), dtype=np.int32),))
# Dimension issue
self.assert_wrong_dimensions(rn, cfunc,
(np.ones(10, dtype=np.float64),))
# no nans or infs when p="None" (default for kwarg).
self.assert_no_nan_or_inf(cfunc,
(np.array([[1., 2., ], [np.inf, np.nan]],
dtype=np.float64),))
# assert raises for an invalid norm kind kwarg
self.assert_invalid_norm_kind(cfunc, (np.array([[1., 2.], [3., 4.]],
dtype=np.float64), 6))
class TestLinalgMatrixRank(TestLinalgSystems):
"""
Tests for np.linalg.matrix_rank.
"""
@needs_lapack
def test_linalg_matrix_rank(self):
"""
Test np.linalg.matrix_rank
"""
cfunc = jit(nopython=True)(matrix_rank_matrix)
def check(a, **kwargs):
expected = matrix_rank_matrix(a, **kwargs)
got = cfunc(a, **kwargs)
# Ranks are integral so comparison should be trivial.
# check the rank is the same
np.testing.assert_allclose(got, expected)
# Ensure proper resource management
with self.assertNoNRTLeak():
cfunc(a, **kwargs)
sizes = [(7, 1), (11, 5), (5, 11), (3, 3), (1, 7)]
for size, dtype, order in \
product(sizes, self.dtypes, 'FC'):
# check full rank system
a = self.specific_sample_matrix(size, dtype, order)
check(a)
# If the system is a matrix, check rank deficiency is reported
# correctly. Check all ranks from 0 to (full rank - 1).
tol = 1e-13
# first check 1 to (full rank - 1)
for k in range(1, min(size) - 1):
# check rank k
a = self.specific_sample_matrix(size, dtype, order, rank=k)
self.assertEqual(cfunc(a), k)
check(a)
# check provision of a tolerance works as expected
# create a (m x n) diagonal matrix with a singular value
# guaranteed below the tolerance 1e-13
m, n = a.shape
a[:, :] = 0. # reuse `a`'s memory
idx = np.nonzero(np.eye(m, n))
if np.iscomplexobj(a):
b = 1. + np.random.rand(k) + 1.j +\
1.j * np.random.rand(k)
# min singular value is sqrt(2)*1e-14
b[0] = 1e-14 + 1e-14j
else:
b = 1. + np.random.rand(k)
b[0] = 1e-14 # min singular value is 1e-14
a[idx[0][:k], idx[1][:k]] = b.astype(dtype)
# rank should be k-1 (as tol is present)
self.assertEqual(cfunc(a, tol), k - 1)
check(a, tol=tol)
# then check zero rank
a[:, :] = 0.
self.assertEqual(cfunc(a), 0)
check(a)
# add in a singular value that is small
if np.iscomplexobj(a):
a[-1, -1] = 1e-14 + 1e-14j
else:
a[-1, -1] = 1e-14
# check the system has zero rank to a given tolerance
self.assertEqual(cfunc(a, tol), 0)
check(a, tol=tol)
# check the zero vector returns rank 0 and a nonzero vector
# returns rank 1.
for dt in self.dtypes:
a = np.zeros((5), dtype=dt)
self.assertEqual(cfunc(a), 0)
check(a)
# make it a nonzero vector
a[0] = 1.
self.assertEqual(cfunc(a), 1)
check(a)
# empty
for sz in [(0, 1), (1, 0), (0, 0)]:
for tol in [None, 1e-13]:
self.assert_raise_on_empty(cfunc, (np.empty(sz), tol))
rn = "matrix_rank"
# Wrong dtype
self.assert_wrong_dtype(rn, cfunc,
(np.ones((2, 2), dtype=np.int32),))
# Dimension issue
self.assert_wrong_dimensions_1D(
rn, cfunc, (np.ones(
12, dtype=np.float64).reshape(
2, 2, 3),))
# no nans or infs for 2D case
self.assert_no_nan_or_inf(cfunc,
(np.array([[1., 2., ], [np.inf, np.nan]],
dtype=np.float64),))
@needs_lapack
def test_no_input_mutation(self):
# this is here to test no input mutation by
# numba.np.linalg._compute_singular_values
# which is the workhorse for norm with 2d input, rank and cond.
X = np.array([[1., 3, 2, 7,],
[-5, 4, 2, 3,],
[9, -3, 1, 1,],
[2, -2, 2, 8,]], order='F')
X_orig = np.copy(X)
@jit(nopython=True)
def func(X, test):
if test:
# not executed, but necessary to trigger A ordering in X
X = X[1:2, :]
return np.linalg.matrix_rank(X)
expected = func.py_func(X, False)
np.testing.assert_allclose(X, X_orig)
got = func(X, False)
np.testing.assert_allclose(X, X_orig)
np.testing.assert_allclose(expected, got)
class TestLinalgMatrixPower(TestLinalgBase):
"""
Tests for np.linalg.matrix_power.
"""
def assert_int_exponenent(self, cfunc, args):
# validate first arg is ok
cfunc(args[0], 1)
# pass in both args and assert fail
with self.assertRaises(errors.TypingError):
cfunc(*args)
@needs_lapack
def test_linalg_matrix_power(self):
cfunc = jit(nopython=True)(matrix_power_matrix)
def check(a, pwr):
expected = matrix_power_matrix(a, pwr)
got = cfunc(a, pwr)
# check that the computed results are contig and in the same way
self.assert_contig_sanity(got, "C")
res = 5 * np.finfo(a.dtype).resolution
np.testing.assert_allclose(got, expected, rtol=res, atol=res)
# Ensure proper resource management
with self.assertNoNRTLeak():
cfunc(a, pwr)
sizes = [(1, 1), (5, 5), (7, 7)]
powers = [-33, -17] + list(range(-10, 10)) + [17, 33]
for size, pwr, dtype, order in \
product(sizes, powers, self.dtypes, 'FC'):
a = self.specific_sample_matrix(size, dtype, order)
check(a, pwr)
a = np.empty((0, 0), dtype=dtype, order=order)
check(a, pwr)
rn = "matrix_power"
# Wrong dtype
self.assert_wrong_dtype(rn, cfunc,
(np.ones((2, 2), dtype=np.int32), 1))
# not an int power
self.assert_wrong_dtype(rn, cfunc,
(np.ones((2, 2), dtype=np.int32), 1))
# non square system
args = (np.ones((3, 5)), 1)
msg = 'input must be a square array'
self.assert_error(cfunc, args, msg)
# Dimension issue
self.assert_wrong_dimensions(rn, cfunc,
(np.ones(10, dtype=np.float64), 1))
# non-integer supplied as exponent
self.assert_int_exponenent(cfunc, (np.ones((2, 2)), 1.2))
# singular matrix is not invertible
self.assert_raise_on_singular(cfunc, (np.array([[0., 0], [1, 1]]), -1))
class TestTrace(TestLinalgBase):
"""
Tests for np.trace.
"""
def setUp(self):
super(TestTrace, self).setUp()
# compile two versions, one with and one without the offset kwarg
self.cfunc_w_offset = jit(nopython=True)(trace_matrix)
self.cfunc_no_offset = jit(nopython=True)(trace_matrix_no_offset)
def assert_int_offset(self, cfunc, a, **kwargs):
# validate first arg is ok
cfunc(a)
# pass in kwarg and assert fail
with self.assertRaises(errors.TypingError):
cfunc(a, **kwargs)
def test_trace(self):
def check(a, **kwargs):
if 'offset' in kwargs:
expected = trace_matrix(a, **kwargs)
cfunc = self.cfunc_w_offset
else:
expected = trace_matrix_no_offset(a, **kwargs)
cfunc = self.cfunc_no_offset
got = cfunc(a, **kwargs)
res = 5 * np.finfo(a.dtype).resolution
np.testing.assert_allclose(got, expected, rtol=res, atol=res)
# Ensure proper resource management
with self.assertNoNRTLeak():
cfunc(a, **kwargs)
# test: column vector, tall, wide, square, row vector
# prime sizes
sizes = [(7, 1), (11, 5), (5, 11), (3, 3), (1, 7)]
# offsets to cover the range of the matrix sizes above
offsets = [-13, -12, -11] + list(range(-10, 10)) + [11, 12, 13]
for size, offset, dtype, order in \
product(sizes, offsets, self.dtypes, 'FC'):
a = self.specific_sample_matrix(size, dtype, order)
check(a, offset=offset)
if offset == 0:
check(a)
a = np.empty((0, 0), dtype=dtype, order=order)
check(a, offset=offset)
if offset == 0:
check(a)
rn = "trace"
# Dimension issue
self.assert_wrong_dimensions(rn, self.cfunc_w_offset,
(np.ones(10, dtype=np.float64), 1), False)
self.assert_wrong_dimensions(rn, self.cfunc_no_offset,
(np.ones(10, dtype=np.float64),), False)
# non-integer supplied as exponent
self.assert_int_offset(
self.cfunc_w_offset, np.ones(
(2, 2)), offset=1.2)
def test_trace_w_optional_input(self):
"Issue 2314"
@jit("(optional(float64[:,:]),)", nopython=True)
def tested(a):
return np.trace(a)
a = np.ones((5, 5), dtype=np.float64)
tested(a)
with self.assertRaises(TypeError) as raises:
tested(None)
errmsg = str(raises.exception)
self.assertEqual('expected array(float64, 2d, A), got None', errmsg)
class TestBasics(TestLinalgSystems): # TestLinalgSystems for 1d test
order1 = cycle(['F', 'C', 'C', 'F'])
order2 = cycle(['C', 'F', 'C', 'F'])
# test: column vector, matrix, row vector, 1d sizes
# (7, 1, 3) and two scalars
sizes = [(7, 1), (3, 3), (1, 7), (7,), (1,), (3,), 3., 5.]
def _assert_wrong_dim(self, rn, cfunc):
# Dimension issue
self.assert_wrong_dimensions_1D(
rn, cfunc, (np.array([[[1]]], dtype=np.float64), np.ones(1)), False)
self.assert_wrong_dimensions_1D(
rn, cfunc, (np.ones(1), np.array([[[1]]], dtype=np.float64)), False)
def _gen_input(self, size, dtype, order):
if not isinstance(size, tuple):
return size
else:
if len(size) == 1:
return self.sample_vector(size[0], dtype)
else:
return self.sample_vector(
size[0] * size[1],
dtype).reshape(
size, order=order)
def _get_input(self, size1, size2, dtype):
a = self._gen_input(size1, dtype, next(self.order1))
b = self._gen_input(size2, dtype, next(self.order2))
# force domain consistency as underlying ufuncs require it
if np.iscomplexobj(a):
b = b + 1j
if np.iscomplexobj(b):
a = a + 1j
return (a, b)
def test_outer(self):
cfunc = jit(nopython=True)(outer_matrix)
def check(a, b, **kwargs):
# check without kwargs
expected = outer_matrix(a, b)
got = cfunc(a, b)
res = 5 * np.finfo(np.asarray(a).dtype).resolution
np.testing.assert_allclose(got, expected, rtol=res, atol=res)
# if kwargs present check with them too
if 'out' in kwargs:
got = cfunc(a, b, **kwargs)
np.testing.assert_allclose(got, expected, rtol=res,
atol=res)
np.testing.assert_allclose(kwargs['out'], expected,
rtol=res, atol=res)
# Ensure proper resource management
with self.assertNoNRTLeak():
cfunc(a, b, **kwargs)
dts = cycle(self.dtypes)
for size1, size2 in product(self.sizes, self.sizes):
dtype = next(dts)
(a, b) = self._get_input(size1, size2, dtype)
check(a, b)
c = np.empty((np.asarray(a).size, np.asarray(b).size),
dtype=np.asarray(a).dtype)
check(a, b, out=c)
self._assert_wrong_dim("outer", cfunc)
def test_kron(self):
cfunc = jit(nopython=True)(kron_matrix)
def check(a, b, **kwargs):
expected = kron_matrix(a, b)
got = cfunc(a, b)
res = 5 * np.finfo(np.asarray(a).dtype).resolution
np.testing.assert_allclose(got, expected, rtol=res, atol=res)
# Ensure proper resource management
with self.assertNoNRTLeak():
cfunc(a, b)
for size1, size2, dtype in \
product(self.sizes, self.sizes, self.dtypes):
(a, b) = self._get_input(size1, size2, dtype)
check(a, b)
self._assert_wrong_dim("kron", cfunc)
args = (np.empty(10)[::2], np.empty(10)[::2])
msg = "only supports 'C' or 'F' layout"
self.assert_error(cfunc, args, msg, err=errors.TypingError)
class TestHelpers(TestCase):
def test_copy_to_fortran_order(self):
from numba.np.linalg import _copy_to_fortran_order
def check(udt, expectfn, shapes, dtypes, orders):
for shape, dtype, order in product(shapes, dtypes, orders):
a = np.arange(np.prod(shape)).reshape(shape, order=order)
r = udt(a)
# check correct operation
self.assertPreciseEqual(expectfn(a), r)
# check new copy has made
self.assertNotEqual(a.ctypes.data, r.ctypes.data)
@njit
def direct_call(a):
return _copy_to_fortran_order(a)
shapes = [(3, 4), (3, 2, 5)]
dtypes = [np.intp]
orders = ['C', 'F']
check(direct_call, np.asfortranarray, shapes, dtypes, orders)
@njit
def slice_to_any(a):
# make a 'any' layout slice
sliced = a[::2][0]
return _copy_to_fortran_order(sliced)
shapes = [(3, 3, 4), (3, 3, 2, 5)]
dtypes = [np.intp]
orders = ['C', 'F']
def expected_slice_to_any(a):
# make a 'any' layout slice
sliced = a[::2][0]
return np.asfortranarray(sliced)
check(slice_to_any, expected_slice_to_any, shapes, dtypes, orders)
if __name__ == '__main__':
unittest.main()
|
79eab94b2ede02e16ca44abf4803d1a72747323f
|
b43e0384ec4bfacec2571a2bb41ce563267db449
|
/jesse/services/required_candles.py
|
99697e1e25307857c640dca34bcae65a90020477
|
[
"MIT"
] |
permissive
|
jesse-ai/jesse
|
55b73448b767492a20f8bc56c28306a1a24f8599
|
fadb03b5fcc06f0655c6a5d877435fb872a97c5e
|
refs/heads/master
| 2023-08-24T15:28:52.875208
| 2023-08-24T13:53:31
| 2023-08-24T13:53:31
| 156,847,937
| 5,259
| 722
|
MIT
| 2023-09-10T13:51:26
| 2018-11-09T10:38:44
|
Python
|
UTF-8
|
Python
| false
| false
| 5,742
|
py
|
required_candles.py
|
import arrow
import numpy as np
import jesse.helpers as jh
from jesse.config import config
from jesse.exceptions import CandleNotFoundInDatabase
from jesse.models import Candle
from jesse.services.cache import cache
from jesse.services.candle import generate_candle_from_one_minutes
from jesse.store import store
def load_required_candles(exchange: str, symbol: str, start_date_str: str, finish_date_str: str) -> np.ndarray:
"""
loads initial candles that required before executing strategies.
210 for the biggest timeframe and more for the rest
"""
start_date = jh.arrow_to_timestamp(arrow.get(start_date_str, 'YYYY-MM-DD'))
finish_date = jh.arrow_to_timestamp(arrow.get(finish_date_str, 'YYYY-MM-DD')) - 60000
# validate
if start_date == finish_date:
raise ValueError('start_date and finish_date cannot be the same.')
if start_date > finish_date:
raise ValueError('start_date cannot be bigger than finish_date.')
if finish_date > arrow.utcnow().int_timestamp * 1000:
raise ValueError('Can\'t backtest the future!')
max_timeframe = jh.max_timeframe(config['app']['considering_timeframes'])
short_candles_count = jh.get_config('env.data.warmup_candles_num', 210) * jh.timeframe_to_one_minutes(max_timeframe)
pre_finish_date = start_date - 60_000
pre_start_date = pre_finish_date - short_candles_count * 60_000
# make sure starting from the beginning of the day instead
pre_start_date = jh.timestamp_to_arrow(pre_start_date).floor('day').int_timestamp * 1000
# update candles_count to count from the beginning of the day instead
short_candles_count = int((pre_finish_date - pre_start_date) / 60_000)
key = jh.key(exchange, symbol)
cache_key = f'{jh.timestamp_to_date(pre_start_date)}-{jh.timestamp_to_date(pre_finish_date)}-{key}'
cached_value = cache.get_value(cache_key)
# if cache exists
if cached_value:
candles_tuple = cached_value
# not cached, get and cache for later calls in the next 5 minutes
else:
# fetch from database
candles_tuple = tuple(
Candle.select(
Candle.timestamp, Candle.open, Candle.close, Candle.high, Candle.low,
Candle.volume
).where(
Candle.exchange == exchange,
Candle.symbol == symbol,
Candle.timeframe == '1m' or Candle.timeframe.is_null(),
Candle.timestamp.between(pre_start_date, pre_finish_date)
).order_by(Candle.timestamp.asc()).tuples()
)
# cache it for near future calls
cache.set_value(cache_key, candles_tuple, expire_seconds=60 * 60 * 24 * 7)
candles = np.array(candles_tuple)
if len(candles) < short_candles_count + 1:
first_existing_candle = tuple(
Candle.select(Candle.timestamp).where(
Candle.exchange == exchange,
Candle.symbol == symbol,
Candle.timeframe == '1m' or Candle.timeframe.is_null()
).order_by(Candle.timestamp.asc()).limit(1).tuples()
)
if not len(first_existing_candle):
raise CandleNotFoundInDatabase(
f'No candle for {exchange} {symbol} is present in the database. Try importing candles.'
)
first_existing_candle = first_existing_candle[0][0]
last_existing_candle = tuple(
Candle.select(Candle.timestamp).where(
Candle.exchange == exchange,
Candle.symbol == symbol,
Candle.timeframe == '1m' or Candle.timeframe.is_null()
).order_by(Candle.timestamp.desc()).limit(1).tuples()
)[0][0]
first_backtestable_timestamp = first_existing_candle + (pre_finish_date - pre_start_date) + (60_000 * 1440)
# if first backtestable timestamp is in the future, that means we have some but not enough candles
if first_backtestable_timestamp > jh.today_to_timestamp():
raise CandleNotFoundInDatabase(
f'Not enough candle for {exchange} {symbol} is present in the database. Jesse requires "210 * biggest_timeframe" warm-up candles. '
'Try importing more candles from an earlier date.'
)
raise CandleNotFoundInDatabase(
f'Not enough candles for {exchange} {symbol} exists to run backtest from {start_date_str} => {finish_date_str}. \n'
f'Are you considering the warmup candles? For more info please read:\n https://jesse.trade/help/faq/i-imported-candles-but-keep-getting-not-enough-candles'
)
return candles
def inject_required_candles_to_store(candles: np.ndarray, exchange: str, symbol: str) -> None:
"""
generate and add required candles to the candle store
"""
# batch add 1m candles:
store.candles.batch_add_candle(candles, exchange, symbol, '1m', with_generation=False)
# loop to generate, and add candles (without execution)
for i in range(len(candles)):
for timeframe in config['app']['considering_timeframes']:
# skip 1m. already added
if timeframe == '1m':
continue
num = jh.timeframe_to_one_minutes(timeframe)
if (i + 1) % num == 0:
generated_candle = generate_candle_from_one_minutes(
timeframe,
candles[(i - (num - 1)):(i + 1)],
True
)
store.candles.add_candle(
generated_candle,
exchange,
symbol,
timeframe,
with_execution=False,
with_generation=False
)
|
9f7587877b46341f9b4f33c71aae525a3beb323a
|
2a1b8a671aceda6bc446f8ce26400aa84fa444a6
|
/Packs/GoogleVertexAI/Integrations/GoogleVertexAI/GoogleVertexAI.py
|
197682d40b75c6cb4cf2327b4effe8ff917c88b9
|
[
"MIT"
] |
permissive
|
demisto/content
|
6d4722d46f0ff0beea2748e9f7de585bf91a78b4
|
890def5a0e0ae8d6eaa538148249ddbc851dbb6b
|
refs/heads/master
| 2023-09-04T00:02:25.618032
| 2023-09-03T21:56:22
| 2023-09-03T21:56:22
| 60,525,392
| 1,023
| 1,921
|
MIT
| 2023-09-14T20:55:24
| 2016-06-06T12:17:02
|
Python
|
UTF-8
|
Python
| false
| false
| 11,247
|
py
|
GoogleVertexAI.py
|
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
''' IMPORTS '''
import json
import urllib3
# Disable insecure warnings
urllib3.disable_warnings()
''' GLOBAL Variables '''
DISABLE_SSL = demisto.params().get('insecure', False)
PROXY = demisto.params().get('proxy')
PROMPT = demisto.params().get('prompt')
SERVICE_SCOPES = "https://www.googleapis.com/auth/cloud-platform"
REDIRECT_URI = 'https://oproxy.demisto.ninja/authcode'
AUTH_URL = 'https://accounts.google.com/o/oauth2/auth'
ACCESS_TOKEN_URL = 'https://oauth2.googleapis.com/token'
PROJECT_ID = demisto.params().get('ProjectID')
URL = 'https://us-central1-aiplatform.googleapis.com/v1/projects/'
AI_Model = 'chat-bison:predict'
TOKEN = demisto.params().get('token')
CLIENT_ID = demisto.params().get('ID')
CLIENT_SECRET = demisto.params().get('Secret')
AUTH_CODE = demisto.params().get('Authentication_Code')
ERROR_MSG = ("ERROR: The authentication code has been reset"
"Please reset integration cache for Vetex AI Instance"
"in XSOAR and regenerate the 'Authorization code'")
''' CLIENT CLASS '''
class Client(BaseClient):
"""
Client class to interact with Google Vertex AI API
"""
def __init__(self, token_str: str, base_url: str, proxy: bool, verify: bool):
super().__init__(base_url=URL, proxy=PROXY, verify=verify)
self.token_str = token_str
self.base_url = base_url
self.proxy = proxy
self.headers = {'Authorization': f"Bearer {self.token_str}", "Content-Type": "application/json"}
def PaLMModel(self, prompt: str):
options = {"instances": [{"messages": [{"content": prompt}]}]}
return self._http_request(method='POST',
url_suffix=f'{PROJECT_ID}/locations/us-central1/publishers/google/models/{AI_Model}',
json_data=options, headers=self.headers)
''' MAIN FUNCTIONS '''
def createAuthorizationURL():
# The client ID and access scopes are required.
partOne = f"{AUTH_URL}/oauthchooseaccount?scope={SERVICE_SCOPES}&access_type=offline&prompt=consent"
partTwo = f"&response_type=code&state=state_parameter_passthrough_value&redirect_uri={REDIRECT_URI}&client_id={CLIENT_ID}"
authorization_url = partOne + partTwo
return authorization_url
def check_access_token_validation():
"""
Access tokens expires in 1 hour, then using refresh_access_token function we will request for a new access token
"""
demisto.debug("Start Token Validation")
integration_context: dict = get_integration_context()
access_token: str = integration_context.get('access_token', '')
valid_until: int = integration_context.get('valid_until', int)
time_now = epoch_seconds()
if access_token and (time_now < valid_until):
demisto.debug("Access Token still valid")
return access_token
elif access_token and (time_now > valid_until):
demisto.debug("Access Token is expired, using refresh token")
access_token = refresh_access_token()
return access_token
else:
access_token = get_access_token()
return access_token
def get_access_token():
"""
Generate a new Access Token using ClientID, ClientSecret and configured Authentication Code
"""
demisto.debug("Generate a new access token")
integration_context: dict = get_integration_context()
data: dict = {
'code': AUTH_CODE,
'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET,
'redirect_uri': REDIRECT_URI,
'grant_type': 'authorization_code'
}
response: requests.Response = requests.post(
ACCESS_TOKEN_URL,
data=data,
verify=DISABLE_SSL
)
if not response.ok:
error = error_parser(response)
raise ValueError(f'Failed to get access token [{response.status_code}] - {error}')
response_json: dict = response.json()
access_token = response_json.get('access_token', '')
expires_in: int = response_json.get('expires_in', 3595)
refresh_token = response_json.get('refresh_token', '')
time_now: int = epoch_seconds()
time_buffer = 5 # seconds by which to shorten the validity period
if expires_in - time_buffer > 0:
expires_in -= time_buffer
integration_context['refresh_token'] = refresh_token
integration_context['access_token'] = access_token
integration_context['valid_until'] = time_now + expires_in
set_integration_context(integration_context)
return access_token
def refresh_access_token():
"""
A refresh token might stop working for one of these reasons:
The user has revoked your app's access. The refresh token has not been used for six months
https://developers.google.com/identity/protocols/oauth2#:~:text=Refresh%20token%20expiration,
-You%20must%20write&text=A%20refresh%20token%20might%20stop,been%20used%20for%20six%20months.
"""
demisto.debug("Refresh Access token using refresh_token from integration_context")
integration_context: dict = get_integration_context()
refresh_token: str = integration_context.get('refresh_token', '')
data: dict = {
'refresh_token': refresh_token,
'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET,
'grant_type': 'refresh_token'
}
response: requests.Response = requests.post(
ACCESS_TOKEN_URL,
data=data,
verify=DISABLE_SSL
)
if not response.ok:
error = error_parser(response)
raise ValueError(f'Failed to get refresh token [{response.status_code}] - {error}')
response_json: dict = response.json()
access_token = response_json.get('access_token', '')
expires_in: int = response_json.get('expires_in', 3595)
time_now: int = epoch_seconds()
time_buffer = 5 # seconds by which to shorten the validity period
if expires_in - time_buffer > 0:
expires_in -= time_buffer
integration_context['refresh_token'] = refresh_token
integration_context['access_token'] = access_token
integration_context['valid_until'] = time_now + expires_in
set_integration_context(integration_context)
return access_token
def epoch_seconds(d: datetime = None) -> int:
"""
Return the number of seconds for given date. If no date, return current.
:param d: timestamp datetime object
:return: timestamp in epoch
"""
if not d:
d = datetime.utcnow()
return int((d - datetime.utcfromtimestamp(0)).total_seconds())
def resetIntegrationContext():
"""
In case of error related to authentication or authorization, the cached context will be reseted
"""
demisto.debug(ERROR_MSG)
integration_context: dict = get_integration_context()
integration_context['refresh_token'] = ""
integration_context['access_token'] = ""
integration_context['valid_until'] = ""
set_integration_context(integration_context)
return True
def error_parser(resp_err: requests.Response) -> str:
"""
Parse Error
"""
try:
response: dict = resp_err.json()
if "Unauthorized" in response.get('error_description', ''):
resetIntegrationContext()
raise ValueError(ERROR_MSG)
elif "invalid authentication credentials" in response.get('error_description', ''):
resetIntegrationContext()
raise ValueError(ERROR_MSG)
elif "Bad" in response.get('error_description', ''):
resetIntegrationContext()
raise ValueError(ERROR_MSG)
else:
error = response.get('error', {})
err_str = (f"{error.get('code', '')}: {error.get('message', '')}" if isinstance(error, dict)
else response.get('error_description', ''))
if err_str:
demisto.debug(err_str)
return err_str
# If no error message
raise ValueError()
except ValueError:
return resp_err.text
def test_module(client: Client):
"""
This is the call made when pressing the integration test button.
"""
promptText = "Hi, what is your name"
status = ''
try:
response = client.PaLMModel(promptText)
rep = json.dumps(response)
repJSON = json.loads(rep)
PaLMResp = repJSON.get('predictions', [])[0].get('candidates', [])[0].get('content', "")
if PaLMResp:
status = 'ok'
return status
else:
status = ("There is an error in communciating with Google Vertex AI API"
"- Please regenerate the Authentication Code again")
except Exception as e:
exception_text = str(e).lower()
if 'Bad Request' in exception_text or 'invalid_grant' in exception_text:
status = ERROR_MSG
return status
else:
raise e
return status
def send_prompts_PaLM_command(client: Client, prompt: str) -> CommandResults:
"""
Send Text to Bard
"""
PaLM_response = client.PaLMModel(prompt)
return PaLM_output(PaLM_response)
def PaLM_output(response) -> CommandResults:
"""
Convert response from ChatGPT to a human readable format in markdown table
:return: CommandResults return output of ChatGPT response
:rtype: ``CommandResults``
"""
if response and isinstance(response, dict):
rep = json.dumps(response)
repJSON = json.loads(rep)
PaLMResp = repJSON.get('predictions', [])[0].get('candidates', [])[0].get('content', "")
context = [{'PaLM Model Response': PaLMResp}]
markdown = tableToMarkdown(
'Google Vertex AI API Response',
context,
)
results = CommandResults(
readable_output=markdown,
outputs_prefix='GoogleVertexAIResponse',
outputs_key_field='predictions',
outputs=context
)
return results
else:
raise DemistoException('Error in results')
''' MAIN FUNCTION '''
def main():
"""
Main function, runs command functions
"""
params = demisto.params()
args = demisto.args()
command = demisto.command()
verify = not params.get('insecure', False)
try:
if command == 'test-module':
access_token = check_access_token_validation()
client = Client(token_str=access_token, base_url=URL, verify=verify, proxy=PROXY)
return_results(test_module(client))
elif command == 'google-vertex-PaLM-chat':
access_token = check_access_token_validation()
client = Client(token_str=access_token, base_url=URL, verify=verify, proxy=PROXY)
return_results(send_prompts_PaLM_command(client, **args))
elif command == 'google-vertex-ai-generate-auth-url':
return_results(createAuthorizationURL())
except Exception as e:
if 'Quota exceeded for quota metric' in str(e):
return_error('Quota for Google Vertex API exceeded')
else:
return_error(str(e))
# python2 uses __builtin__ python3 uses builtins
if __name__ == '__builtin__' or __name__ == 'builtins':
main()
|
846ec1a85356ab52f29cc8e82e021e778a585177
|
07926048f900b4d9e88b45670f955a319ed758c5
|
/mpc/env_dx/cartpole.py
|
43556cc9af68b406f6a661d2427d76c48f9fd50a
|
[
"MIT"
] |
permissive
|
locuslab/mpc.pytorch
|
8f8f3a4323247d58609b3bffe8402208fd0f8a64
|
63732fa85ab2a151045493c4e67653210ca3d7ff
|
refs/heads/master
| 2022-11-20T12:27:48.708670
| 2022-11-11T00:39:47
| 2022-11-11T00:39:47
| 151,274,974
| 731
| 136
|
MIT
| 2022-07-17T21:42:12
| 2018-10-02T15:09:04
|
Python
|
UTF-8
|
Python
| false
| false
| 4,439
|
py
|
cartpole.py
|
#!/usr/bin/env python3
import torch
from torch.autograd import Function, Variable
import torch.nn.functional as F
from torch import nn
from torch.nn.parameter import Parameter
import numpy as np
from mpc import util
import os
import shutil
FFMPEG_BIN = shutil.which('ffmpeg')
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.style.use('bmh')
# import sys
# from IPython.core import ultratb
# sys.excepthook = ultratb.FormattedTB(mode='Verbose',
# color_scheme='Linux', call_pdb=1)
class CartpoleDx(nn.Module):
def __init__(self, params=None):
super().__init__()
self.n_state = 5
self.n_ctrl = 1
# model parameters
if params is None:
# gravity, masscart, masspole, length
self.params = Variable(torch.Tensor((9.8, 1.0, 0.1, 0.5)))
else:
self.params = params
assert len(self.params) == 4
self.force_mag = 100.
self.theta_threshold_radians = np.pi#12 * 2 * np.pi / 360
self.x_threshold = 2.4
self.max_velocity = 10
self.dt = 0.05
self.lower = -self.force_mag
self.upper = self.force_mag
# 0 1 2 3 4
# x dx cos(th) sin(th) dth
self.goal_state = torch.Tensor( [ 0., 0., 1., 0., 0.])
self.goal_weights = torch.Tensor([0.1, 0.1, 1., 1., 0.1])
self.ctrl_penalty = 0.001
self.mpc_eps = 1e-4
self.linesearch_decay = 0.5
self.max_linesearch_iter = 2
def forward(self, state, u):
squeeze = state.ndimension() == 1
if squeeze:
state = state.unsqueeze(0)
u = u.unsqueeze(0)
if state.is_cuda and not self.params.is_cuda:
self.params = self.params.cuda()
gravity, masscart, masspole, length = torch.unbind(self.params)
total_mass = masspole + masscart
polemass_length = masspole * length
u = torch.clamp(u[:,0], -self.force_mag, self.force_mag)
x, dx, cos_th, sin_th, dth = torch.unbind(state, dim=1)
th = torch.atan2(sin_th, cos_th)
cart_in = (u + polemass_length * dth**2 * sin_th) / total_mass
th_acc = (gravity * sin_th - cos_th * cart_in) / \
(length * (4./3. - masspole * cos_th**2 /
total_mass))
xacc = cart_in - polemass_length * th_acc * cos_th / total_mass
x = x + self.dt * dx
dx = dx + self.dt * xacc
th = th + self.dt * dth
dth = dth + self.dt * th_acc
state = torch.stack((
x, dx, torch.cos(th), torch.sin(th), dth
), 1)
return state
def get_frame(self, state, ax=None):
state = util.get_data_maybe(state.view(-1))
assert len(state) == 5
x, dx, cos_th, sin_th, dth = torch.unbind(state)
gravity, masscart, masspole, length = torch.unbind(self.params)
th = np.arctan2(sin_th, cos_th)
th_x = sin_th*length
th_y = cos_th*length
if ax is None:
fig, ax = plt.subplots(figsize=(6,6))
else:
fig = ax.get_figure()
ax.plot((x,x+th_x), (0, th_y), color='k')
ax.set_xlim((-length*2, length*2))
ax.set_ylim((-length*2, length*2))
return fig, ax
def get_true_obj(self):
q = torch.cat((
self.goal_weights,
self.ctrl_penalty*torch.ones(self.n_ctrl)
))
assert not hasattr(self, 'mpc_lin')
px = -torch.sqrt(self.goal_weights)*self.goal_state #+ self.mpc_lin
p = torch.cat((px, torch.zeros(self.n_ctrl)))
return Variable(q), Variable(p)
if __name__ == '__main__':
dx = CartpoleDx()
n_batch, T = 8, 50
u = torch.zeros(T, n_batch, dx.n_ctrl)
xinit = torch.zeros(n_batch, dx.n_state)
th = 1.
xinit[:,2] = np.cos(th)
xinit[:,3] = np.sin(th)
x = xinit
for t in range(T):
x = dx(x, u[t])
fig, ax = dx.get_frame(x[0])
fig.savefig('{:03d}.png'.format(t))
plt.close(fig)
vid_file = 'cartpole_vid.mp4'
if os.path.exists(vid_file):
os.remove(vid_file)
cmd = ('{} -loglevel quiet '
'-r 32 -f image2 -i %03d.png -vcodec '
'libx264 -crf 25 -pix_fmt yuv420p {}').format(
FFMPEG_BIN,
vid_file
)
os.system(cmd)
for t in range(T):
os.remove('{:03d}.png'.format(t))
|
8126a3ce35b2a7c23f9b892da4cbbf3aee8d1c98
|
3e8c37a02052850b06a33b4eb310d51989d72b91
|
/examples/morse_code.py
|
6f349d83083d5addaf961fc3419778a6fea483fc
|
[
"MIT"
] |
permissive
|
pimoroni/blinkt
|
8a4c62a90dac968defe62d4c7fc8cf42b1d5d721
|
27a7dfcd267261a56860ed1f77af7318a31e5488
|
refs/heads/master
| 2023-09-02T02:47:43.476849
| 2022-02-09T22:17:53
| 2022-02-09T22:17:53
| 61,641,200
| 330
| 144
|
MIT
| 2023-02-28T17:39:01
| 2016-06-21T14:38:02
|
C
|
UTF-8
|
Python
| false
| false
| 824
|
py
|
morse_code.py
|
#!/usr/bin/env python
import time
import blinkt
blinkt.set_clear_on_exit()
def show_all(state):
"""Set all LEDs."""
for i in range(blinkt.NUM_PIXELS):
val = state * 255
blinkt.set_pixel(i, val, val, val)
blinkt.show()
def dot():
"""Blink LEDs for 0.05 seconds."""
show_all(1)
time.sleep(0.05)
show_all(0)
time.sleep(0.2)
def dash():
"""Blink LEDs for 0.2 seconds."""
show_all(1)
time.sleep(0.2)
show_all(0)
time.sleep(0.2)
def space():
"""Delay for 0.02 seconds."""
time.sleep(0.2)
# 0 is a space, 1 is a dot and 2 is a dash
MORSE = ' -... .. . -.. - -. . - . - -. -. - '
while True:
for m in MORSE:
if m == ' ':
space()
elif m == '.':
dot()
elif m == '-':
dash()
|
9bd3b489f5453b1e3f951d2f7aea1395e416595d
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/common/blueprints/__init__.py
|
3844b8b23861a6f12793840806a7231102f35fc2
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 4,292
|
py
|
__init__.py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/common/blueprints/__init__.py
import typing
import nations
from constants import IS_CLIENT
from debug_utils import LOG_CURRENT_EXCEPTION
from soft_exception import SoftException
from wotdecorators import singleton
from dossiers2.custom.cache import getCache as getHelperCache
_CONFIG_FILE = 'scripts/server_xml/blueprints.xml'
class BlueprintsException(SoftException):
pass
def wipe(blueprints_cfg, pdata, leaveGold):
pdata['blueprints'] = {}
def makeDefaults():
return {'isEnabled': False,
'useBlueprintsForUnlock': False,
'allowBlueprintsConversion': False,
'levels': {}}
def readConfig(verbose, **overrides):
if IS_CLIENT:
return makeDefaults()
import XmlConfigReader
reader = XmlConfigReader.makeReader(_CONFIG_FILE, '', verbose)
result = _readBlueprints(reader, 'blueprints')
for k in result:
if k not in overrides:
continue
if k in ('isEnabled', 'useBlueprintsForUnlock', 'allowBlueprintsConversion'):
result[k] &= overrides[k]
result[k] = overrides[k]
return result
def _readBlueprints(reader, subsectionName):
section = reader.getSubsection(subsectionName)
if section is None:
return {}
else:
isEnabled = section.readBool('isEnabled', False)
useBlueprintsForUnlock = section.readBool('useBlueprintsForUnlock', False)
allowBlueprintsConversion = section.readBool('allowBlueprintsConversion', False)
levels = {}
levelsSubsection = reader.getSubsection(subsectionName + '/levels')
for lname, lsection in levelsSubsection.items():
_, lvl = str(lname).split('_', 1)
parts = lsection.readInt('parts', 0)
progress = lsection.readFloat('progress', 0)
requires = tuple((int(i) for i in lsection.readString('requires').split())) or (0, 0)
decays = tuple((float(i) for i in lsection.readString('decays').split())) or (0, 0)
allyConversionCoef = _readConversionCoefs(lsection, 'allyConversionCoefs')
levels[int(lvl)] = (parts,
progress,
requires,
decays,
allyConversionCoef)
return {'isEnabled': isEnabled,
'useBlueprintsForUnlock': useBlueprintsForUnlock,
'allowBlueprintsConversion': allowBlueprintsConversion,
'levels': levels}
def _readConversionCoefs(section, subsectionName):
result = {}
for allianceName, groupSection in section[subsectionName].items():
result[nations.ALLIANCE_IDS[allianceName]] = group = dict()
for nationName, _ in groupSection.items():
group[nations.INDICES[nationName]] = groupSection.readFloat(nationName, 1)
return result
@singleton
class g_cache(object):
def __init__(self):
self.__cfg = makeDefaults()
def __getattr__(self, attr):
try:
return self.__cfg[attr]
except KeyError:
raise AttributeError
def init(self, gameParams=None, nofail=True):
cfg = self.__cfg
try:
if gameParams is not None:
blueprints = gameParams['blueprints_config'].settings
else:
blueprints = readConfig(True)
cfg.update(blueprints)
except Exception:
self.fini()
if nofail:
raise
LOG_CURRENT_EXCEPTION()
return
def fini(self):
self.__cfg.clear()
def __nonzero__(self):
return bool(self.__cfg)
def init(gameParams=None, nofail=True):
g_cache.init(gameParams=gameParams, nofail=nofail)
def getAllResearchedVehicles(defaultUnlocks=frozenset()):
return getHelperCache()['vehiclesInTrees'] - defaultUnlocks
def getResearchableVehiclesWithout1Lvl(defaultUnlocks=frozenset()):
return getHelperCache()['vehiclesInTreesWithout1Lvl'] - defaultUnlocks
def getUnlockedVehicles(defaultUnlocks=frozenset()):
return getHelperCache()['vehiclesInTrees'] & defaultUnlocks
def isNationResearched(nationID, defaultUnlocks=frozenset(), unlocks=frozenset()):
return not bool(getHelperCache()['vehiclesInTreesByNation'][nationID] - defaultUnlocks - unlocks)
|
a9d18f7be52c482425ba8095a99ecc5e03001fdb
|
dfd9b83c93395e75d92be8d9ff5175174dafdd82
|
/Server/integrations/basic.client_group/BasicClientGroupExternalAuthenticator.py
|
cbbd74ac6a4bc4601342997a23f696c8043e0e20
|
[
"MIT"
] |
permissive
|
GluuFederation/oxAuth
|
1ebc5775da8762db1e59a7cbb0769b6fc3cf21fc
|
0933f5ef56fc78d65cbec6e4a4310ffa3d175cec
|
refs/heads/master
| 2023-08-23T16:09:14.123800
| 2023-08-01T07:39:01
| 2023-08-01T07:39:01
| 18,150,497
| 425
| 187
|
MIT
| 2023-09-07T11:04:16
| 2014-03-26T19:14:35
|
Java
|
UTF-8
|
Python
| false
| false
| 6,858
|
py
|
BasicClientGroupExternalAuthenticator.py
|
# oxAuth is available under the MIT License (2008). See http://opensource.org/licenses/MIT for full text.
# Copyright (c) 2019, Gluu
#
# Author: Yuriy Movchan
#
from org.gluu.service.cdi.util import CdiUtil
from org.gluu.oxauth.security import Identity
from org.gluu.model.custom.script.type.auth import PersonAuthenticationType
from org.gluu.oxauth.service import UserService, AuthenticationService, AppInitializer
from org.gluu.util import StringHelper
from java.util import Arrays, HashMap
import java
import json
class PersonAuthentication(PersonAuthenticationType):
def __init__(self, currentTimeMillis):
self.currentTimeMillis = currentTimeMillis
def init(self, customScript, configurationAttributes):
print "Basic (client group). Initialization"
self.allow_default_login = False
if configurationAttributes.containsKey("allow_default_login"):
self.allow_default_login = StringHelper.toBoolean(configurationAttributes.get("allow_default_login").getValue2(), False)
if not configurationAttributes.containsKey("configuration_file"):
print "Basic (client group). The property configuration_file is empty"
return False
configurationFilePath = configurationAttributes.get("configuration_file").getValue2()
self.client_configurations = self.loadClientConfigurations(configurationFilePath)
if self.client_configurations == None:
print "Basic (client group). File with client configuration should be not empty"
return False
print "Basic (client group). Initialized successfully"
return True
def destroy(self, clientConfiguration):
print "Basic (client group). Destroy"
print "Basic (client group). Destroyed successfully"
return True
def getApiVersion(self):
return 11
def getAuthenticationMethodClaims(self, requestParameters):
return None
def isValidAuthenticationMethod(self, usageType, configurationAttributes):
return True
def getAlternativeAuthenticationMethod(self, usageType, configurationAttributes):
return None
def authenticate(self, configurationAttributes, requestParameters, step):
authenticationService = CdiUtil.bean(AuthenticationService)
identity = CdiUtil.bean(Identity)
credentials = identity.getCredentials()
session_attributes = identity.getSessionId().getSessionAttributes()
client_id = session_attributes.get("client_id")
print "Basic (client group). Get client_id: '%s' authorization request" % client_id
user_groups = self.client_configurations.get(client_id)
if user_groups == None:
print "Basic (client group). There is no user groups configuration for client_id '%s'. allow_default_login: %s" % (client_id, self.allow_default_login)
if not self.allow_default_login:
return False
result = self.authenticateImpl(credentials, authenticationService)
return result
is_member_client_groups = self.isUserMemberOfGroups(credentials, user_groups)
if not is_member_client_groups:
print "Basic (client group). User '%s' hasn't permissions to log into client_id '%s' application. " % (credentials.getUsername(), client_id)
return False
result = self.authenticateImpl(credentials, authenticationService)
return result
def prepareForStep(self, configurationAttributes, requestParameters, step):
if step == 1:
print "Basic (client group). Prepare for Step 1"
return True
else:
return False
def getExtraParametersForStep(self, configurationAttributes, step):
return None
def getCountAuthenticationSteps(self, configurationAttributes):
return 1
def getPageForStep(self, configurationAttributes, step):
return ""
def logout(self, configurationAttributes, requestParameters):
return True
def authenticateImpl(self, credentials, authenticationService):
print "Basic (client group). Processing user name/password authentication"
user_name = credentials.getUsername()
user_password = credentials.getPassword()
logged_in = False
if StringHelper.isNotEmptyString(user_name) and StringHelper.isNotEmptyString(user_password):
logged_in = authenticationService.authenticate(user_name, user_password)
if not logged_in:
return False
return True
def loadClientConfigurations(self, configurationFile):
clientConfiguration = None
# Load configuration from file
f = open(configurationFile, 'r')
try:
configurationFileJson = json.loads(f.read())
except:
print "Basic (client group). Load configuration from file. Failed to load authentication configuration from file:", configurationFile
return None
finally:
f.close()
clientConfigurations = HashMap()
for client_key in configurationFileJson.keys():
client_config = configurationFileJson[client_key]
client_inum = client_config["client_inum"]
user_groups_array = client_config["user_group"]
user_groups = Arrays.asList(user_groups_array)
clientConfigurations.put(client_inum, user_groups)
print "Basic (client group). Load configuration from file. Loaded '%s' configurations" % clientConfigurations.size()
print clientConfigurations
return clientConfigurations
def isUserMemberOfGroups(self, credentials, groups):
userService = CdiUtil.bean(UserService)
user_name = credentials.getUsername()
if StringHelper.isEmptyString(user_name):
return False
find_user_by_uid = userService.getUser(user_name)
is_member = False
member_of_list = find_user_by_uid.getAttributeValues("memberOf")
if member_of_list == None:
return is_member
print member_of_list
print groups
for member_of in member_of_list:
for group in groups:
if StringHelper.equalsIgnoreCase(group, member_of) or member_of.endswith(group):
is_member = True
break
return is_member
def getNextStep(self, configurationAttributes, requestParameters, step):
return -1
def getLogoutExternalUrl(self, configurationAttributes, requestParameters):
print "Get external logout URL call"
return None
|
a7d493e0e6631ae4b4e251b0b915f5d93ede1194
|
94a3a36c80ba3b69fae8465a5481bc7eef620ea7
|
/train_lemmatizer.py
|
b7f734949b1d5195f07068b508cd02e9450a9d30
|
[
"Apache-2.0"
] |
permissive
|
TurkuNLP/Turku-neural-parser-pipeline
|
6ebf113a0102a3f6e05ee8e0734a9ed3e8dc56f5
|
a3af15b321b32b0373a67b982f9a3e6c578a43f2
|
refs/heads/master
| 2022-10-05T09:22:11.493033
| 2022-09-09T11:52:54
| 2022-09-09T11:52:54
| 132,604,829
| 110
| 39
|
Apache-2.0
| 2021-10-11T13:50:52
| 2018-05-08T12:21:05
|
Python
|
UTF-8
|
Python
| false
| false
| 6,136
|
py
|
train_lemmatizer.py
|
"""
A bit experimental script for training new models for lemmatization
"""
import os
import sys
import glob
from shutil import copyfile, rmtree
import re
from distutils.util import strtobool
from tnparser.lemmatizer_mod import Lemmatizer, read_conllu
ID, FORM, LEMMA, UPOS, XPOS, FEATS, HEAD, DEPREL, DEPS, MISC = range(10)
thisdir=os.path.dirname(os.path.realpath(__file__))
def numeric_sort(x):
r = re.compile('(\d+)')
l = r.split(x)
return [int(y) if y.isdigit() else y for y in l]
def copy_lemmatizer(args):
files=glob.glob(f"{args.tmp}/lemmatizer_step_*.pt".format(name=args.name))
latest=sorted(files, key =numeric_sort)[-1]
copyfile(latest, f"{args.tmp}/lemmatizer.pt")
def create_dataset(fname):
lemmatizer = Lemmatizer()
data=[]
with open(fname, "rt", encoding="utf-8") as f:
for comm, sent in read_conllu(f):
for token in sent:
word, lemma = lemmatizer.transform_token(token)
data.append((word, lemma))
return data
def print_tmp(train, devel, path):
with open(os.path.join(path, "train.input"), "wt", encoding="utf-8") as inpf, open(os.path.join(path, "train.output"), "wt", encoding="utf-8") as outf:
for (word, lemma) in train:
print(word, file=inpf)
print(lemma, file=outf)
with open(os.path.join(path, "devel.input"), "wt", encoding="utf-8") as inpf, open(os.path.join(path, "devel.output"), "wt", encoding="utf-8") as outf:
for (word, lemma) in devel:
print(word, file=inpf)
print(lemma, file=outf)
def train_model(args):
# data
preprocess = f"onmt_preprocess -train_src {args.tmp}/train.input -train_tgt {args.tmp}/train.output -valid_src {args.tmp}/devel.input -valid_tgt {args.tmp}/devel.output -save_data {args.tmp}/preprocessed-data -src_words_min_frequency {args.min_char_freq} -tgt_words_min_frequency {args.min_char_freq} -overwrite"
print("", preprocess, "", sep="\n", file=sys.stderr)
status = os.system(preprocess)
if status != 0:
print("Lemmatizer status:", status, "Preprocessing failed.", file=sys.stderr)
sys.exit()
# train model
cuda=""
gpu_ranks=""
if args.gpu != -1:
cuda = f"CUDA_VISIBLE_DEVICES={args.gpu}"
gpu_ranks = f"-gpu_ranks {args.gpu if args.gpu < 1 else 0} "
train = f"{cuda} onmt_train -data {args.tmp}/preprocessed-data -save_model {args.tmp}/lemmatizer -learning_rate {args.lr} -batch_size {args.batch_size} -optim {args.optimizer} -learning_rate_decay {args.learning_rate_decay} -dropout {args.dropout} -encoder_type brnn -train_steps {args.train_steps} -save_checkpoint_steps {args.save_every_steps} -valid_steps {args.save_every_steps} {gpu_ranks}"
print("", train, "", sep="\n", file=sys.stderr)
status = os.system(train)
if status != 0:
print("Lemmatizer status:", status, "Training failed.", file=sys.stderr)
sys.exit()
copy_lemmatizer(args) # copy the latest lemmatizer under correct name
print("Building lemma cache...", file=sys.stderr)
status = os.system(f"cat {args.train_file} | python3 {thisdir}/build_lemma_cache.py > {args.tmp}/lemma_cache.tsv") # build lemma cache
if status != 0:
print("Lemma cache status:", status, "Training failed.", file=sys.stderr)
sys.exit()
print("Training done", file=sys.stderr)
def train(args):
lemmatizer = Lemmatizer()
train_data = create_dataset(args.train_file)
devel_data = create_dataset(args.devel_file)
print_tmp(train_data, devel_data, args.tmp)
train_model(args)
if __name__=="__main__":
import argparse
argparser = argparse.ArgumentParser(description='A script for training a lemmatizer')
argparser.add_argument('--name', default="lemmatizer.pt", help='Model name')
argparser.add_argument('--train_file', type=str, required=True, help='Training data file (conllu)')
argparser.add_argument('--devel_file', type=str, required=True, help='Development data file (conllu)')
argparser.add_argument('--tmp', type=str, default="tmp", help='Directory to place temporary files (default: tmp)')
argparser.add_argument('--min_char_freq', type=int, default=5, help='Minimum character frequency to keep, rest will be replaced with unknown (default: 5)')
argparser.add_argument('--gpu', type=int, default=0, help='GPU device, use -1 for CPU (default: 0)')
argparser.add_argument('--dropout', type=float, default=0.3, help='Dropout (default: 0.3)')
argparser.add_argument('--optimizer', type=str, default="adam", help='Optimizer (adam/sgd, default: adam)')
argparser.add_argument('--lr', type=float, default=0.0005, help='Learning rate (default: 0.0005)')
argparser.add_argument('--learning_rate_decay', type=float, default=0.9, help='Learning rate decay (default: 0.9)')
argparser.add_argument('--batch_size', type=int, default=64, help='Batch size (default: 64)')
argparser.add_argument('--train_steps', type=int, default=10000, help='Train N steps (default: 10,000)')
argparser.add_argument('--save_every_steps', type=int, default=1000, help='Save every N steps (default: 1000)')
args = argparser.parse_args()
print(args, file=sys.stderr)
try:
if os.path.isdir(args.tmp):
input(f'\nTemporary directory {args.tmp} already exists, old files will be deleted. Press <Enter> to continue or <Ctrl-c> to abort.\n'.format(name=args.name))
files=[]
for dirpath, dirnames, filenames in os.walk(args.tmp):
for fname in filenames:
files.append(os.path.join(dirpath, fname))
input('\nDeleting {files}. Press <Enter> to continue or <Ctrl-c> to abort.\n'.format(files=", ".join(files)))
for f in files:
print("Deleting",f,file=sys.stderr)
os.remove(f)
else:
os.mkdir(args.tmp)
except KeyboardInterrupt:
print(file=sys.stderr)
sys.exit(0)
train(args)
|
2f11ae72ae6ba8ce252520c5b75403f1f147af19
|
1016407a00917cde044e37e7323458f4e30daf73
|
/tests/parser/test_matrix.py
|
7339d65d77dcfb9cdfeb4a56b3a2e28b54973fdb
|
[
"MIT"
] |
permissive
|
moshi4/pyCirclize
|
47970cc4b4bdf717794a03d2caa0b0fe62dbf5e2
|
90e78d996b02af50fda8082310ca784586f7b82c
|
refs/heads/main
| 2023-08-04T09:38:07.924711
| 2023-07-20T10:30:49
| 2023-07-20T10:30:49
| 579,313,517
| 410
| 18
|
MIT
| 2023-08-23T12:38:12
| 2022-12-17T09:21:45
|
Python
|
UTF-8
|
Python
| false
| false
| 4,351
|
py
|
test_matrix.py
|
from pathlib import Path
import pandas as pd
from pycirclize.parser import Matrix
def test_load_dataframe_matrix(matrix_df: pd.DataFrame):
"""Test load panda dataframe matrix"""
# Load pandas matrix dataframe
matrix = Matrix(matrix_df)
# Test row & column names
row_names = ["S1", "S2", "S3"]
col_names = ["E1", "E2", "E3", "E4", "E5", "E6"]
assert matrix.all_names == row_names + col_names
assert matrix.row_names == row_names
assert matrix.col_names == col_names
# Only test successfully call function
matrix.to_sectors()
matrix.to_links()
def test_parse_fromto_table(fromto_table_df: pd.DataFrame):
"""Test parse from-to table"""
# Parse from-to table dataframe
matrix = Matrix.parse_fromto_table(fromto_table_df)
# Test row & column names
expected_names = list("ABCDEFG")
assert matrix.all_names == expected_names
assert matrix.row_names == expected_names
assert matrix.col_names == expected_names
# Only test successfully call function
matrix.to_sectors()
matrix.to_links()
def test_load_tsv_matrix(tsv_matrix_file: Path):
"""Test load tsv matrix"""
# Load tsv format matrix file
matrix = Matrix(tsv_matrix_file)
# Test row & column names
row_names = ["S1", "S2", "S3"]
col_names = ["E1", "E2", "E3", "E4", "E5", "E6"]
assert matrix.all_names == row_names + col_names
assert matrix.row_names == row_names
assert matrix.col_names == col_names
# Only test successfully call function
matrix.to_sectors()
matrix.to_links()
def test_load_csv_matrix(csv_matrix_file: Path):
"""Test load csv matrix"""
# Load csv format matrix file
matrix = Matrix(csv_matrix_file, delimiter=",")
# Test row & column names
row_names = ["S1", "S2", "S3"]
col_names = ["E1", "E2", "E3", "E4", "E5", "E6"]
assert matrix.all_names == row_names + col_names
assert matrix.row_names == row_names
assert matrix.col_names == col_names
# Only test successfully call function
matrix.to_sectors()
matrix.to_links()
def test_matrix_sort():
"""Test `matrix.sort()`"""
matrix_df = pd.DataFrame(
[
[1, 2],
[3, 4],
],
index=["R1", "R2"],
columns=["C1", "C2"],
)
matrix = Matrix(matrix_df)
# Ascending Sort
expected_asc_matrix_df = pd.DataFrame(
[
[0, 1, 2, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 3, 4, 0],
],
index=["R1", "C1", "C2", "R2"],
columns=["R1", "C1", "C2", "R2"],
)
asc_matrix_df = matrix.sort("asc").dataframe
assert asc_matrix_df.equals(expected_asc_matrix_df)
# Descending Sort
expected_desc_matrix_df = pd.DataFrame(
[
[0, 4, 3, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 2, 1, 0],
],
index=["R2", "C2", "C1", "R1"],
columns=["R2", "C2", "C1", "R1"],
)
desc_matrix_df = matrix.sort("desc").dataframe
assert desc_matrix_df.equals(expected_desc_matrix_df)
# User-specified Order Sort
expected_sort_matrix_df = pd.DataFrame(
[
[0, 0, 0, 0],
[0, 0, 0, 0],
[1, 2, 0, 0],
[3, 4, 0, 0],
],
index=["C1", "C2", "R1", "R2"],
columns=["C1", "C2", "R1", "R2"],
)
sort_matrix_df = matrix.sort(["C1", "C2", "R1", "R2"]).dataframe
assert sort_matrix_df.equals(expected_sort_matrix_df)
def test_to_fromto_table(tsv_matrix_file: Path):
"""Test `matrix.to_fromto_table()`"""
matrix = Matrix(tsv_matrix_file)
expected_table_df = pd.DataFrame(
[
["S1", "E1", 4],
["S1", "E2", 14],
["S1", "E3", 13],
["S1", "E4", 17],
["S1", "E5", 5],
["S1", "E6", 2],
["S2", "E1", 7],
["S2", "E2", 1],
["S2", "E3", 6],
["S2", "E4", 8],
["S2", "E5", 12],
["S2", "E6", 15],
["S3", "E1", 9],
["S3", "E2", 10],
["S3", "E3", 3],
["S3", "E4", 16],
["S3", "E5", 11],
["S3", "E6", 18],
],
columns=["from", "to", "value"],
)
assert matrix.to_fromto_table().equals(expected_table_df)
|
ad55670eee8e750b4b54df339059838b3715e128
|
09a6d8dbad5b92f93791948b5bf9b75f5cb2e5ce
|
/pennylane/devices/default_qubit.py
|
f4286aa38979859fd6142be94aff21d0008a4b34
|
[
"Apache-2.0"
] |
permissive
|
PennyLaneAI/pennylane
|
458efd5d9457e90ada31ca2ef0fb6bb96a24e9a7
|
0843183ff15a013c2622af5e61fea431d18076d3
|
refs/heads/master
| 2023-09-03T17:00:43.105784
| 2023-09-01T16:15:07
| 2023-09-01T16:15:07
| 129,936,360
| 1,431
| 410
|
Apache-2.0
| 2023-09-14T21:30:56
| 2018-04-17T16:45:42
|
Python
|
UTF-8
|
Python
| false
| false
| 43,614
|
py
|
default_qubit.py
|
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
The default.qubit device is PennyLane's standard qubit-based device.
It implements the necessary :class:`~pennylane._device.Device` methods as well as some built-in
:mod:`qubit operations <pennylane.ops.qubit>`, and provides a very simple pure state
simulation of a qubit-based quantum circuit architecture.
"""
import functools
import itertools
from string import ascii_letters as ABC
from typing import List
import numpy as np
from scipy.sparse import csr_matrix
import pennylane as qml
from pennylane import BasisState, DeviceError, QubitDevice, StatePrep, Snapshot
from pennylane.devices.qubit import measure
from pennylane.operation import Operation
from pennylane.ops import Sum
from pennylane.ops.qubit.attributes import diagonal_in_z_basis
from pennylane.pulse import ParametrizedEvolution
from pennylane.measurements import ExpectationMP
from pennylane.typing import TensorLike
from pennylane.wires import WireError
from .._version import __version__
ABC_ARRAY = np.array(list(ABC))
# tolerance for numerical errors
tolerance = 1e-10
SQRT2INV = 1 / np.sqrt(2)
TPHASE = np.exp(1j * np.pi / 4)
def _get_slice(index, axis, num_axes):
"""Allows slicing along an arbitrary axis of an array or tensor.
Args:
index (int): the index to access
axis (int): the axis to slice into
num_axes (int): total number of axes
Returns:
tuple[slice or int]: a tuple that can be used to slice into an array or tensor
**Example:**
Accessing the 2 index along axis 1 of a 3-axis array:
>>> sl = _get_slice(2, 1, 3)
>>> sl
(slice(None, None, None), 2, slice(None, None, None))
>>> a = np.arange(27).reshape((3, 3, 3))
>>> a[sl]
array([[ 6, 7, 8],
[15, 16, 17],
[24, 25, 26]])
"""
idx = [slice(None)] * num_axes
idx[axis] = index
return tuple(idx)
# pylint: disable=unused-argument
class DefaultQubit(QubitDevice):
"""Default qubit device for PennyLane.
.. warning::
The API of ``DefaultQubit`` will be updated soon to follow a new device interface described
in :class:`pennylane.devices.experimental.Device`.
This change will not alter device behaviour for most workflows, but may have implications for
plugin developers and users who directly interact with device methods. Please consult
:class:`pennylane.devices.experimental.Device` and the implementation in
:class:`pennylane.devices.experimental.DefaultQubit2` for more information on what the new
interface will look like and be prepared to make updates in a coming release. If you have any
feedback on these changes, please create an
`issue <https://github.com/PennyLaneAI/pennylane/issues>`_ or post in our
`discussion forum <https://discuss.pennylane.ai/>`_.
Args:
wires (int, Iterable[Number, str]): Number of subsystems represented by the device,
or iterable that contains unique labels for the subsystems as numbers (i.e., ``[-1, 0, 2]``)
or strings (``['ancilla', 'q1', 'q2']``). Default 1 if not specified.
shots (None, int): How many times the circuit should be evaluated (or sampled) to estimate
the expectation values. Defaults to ``None`` if not specified, which means that the device
returns analytical results.
"""
name = "Default qubit PennyLane plugin"
short_name = "default.qubit"
pennylane_requires = __version__
version = __version__
author = "Xanadu Inc."
operations = {
"GlobalPhase",
"Identity",
"Snapshot",
"BasisState",
"StatePrep",
"QubitStateVector",
"QubitUnitary",
"ControlledQubitUnitary",
"BlockEncode",
"MultiControlledX",
"IntegerComparator",
"DiagonalQubitUnitary",
"PauliX",
"PauliY",
"PauliZ",
"MultiRZ",
"Hadamard",
"S",
"Adjoint(S)",
"T",
"Adjoint(T)",
"SX",
"Adjoint(SX)",
"CNOT",
"SWAP",
"ISWAP",
"PSWAP",
"Adjoint(ISWAP)",
"SISWAP",
"Adjoint(SISWAP)",
"SQISW",
"CSWAP",
"Toffoli",
"CCZ",
"CY",
"CZ",
"CH",
"PhaseShift",
"PCPhase",
"ControlledPhaseShift",
"CPhaseShift00",
"CPhaseShift01",
"CPhaseShift10",
"CPhase",
"RX",
"RY",
"RZ",
"Rot",
"CRX",
"CRY",
"CRZ",
"CRot",
"IsingXX",
"IsingYY",
"IsingZZ",
"IsingXY",
"SingleExcitation",
"SingleExcitationPlus",
"SingleExcitationMinus",
"DoubleExcitation",
"DoubleExcitationPlus",
"DoubleExcitationMinus",
"QubitCarry",
"QubitSum",
"OrbitalRotation",
"QFT",
"ECR",
}
observables = {
"PauliX",
"PauliY",
"PauliZ",
"Hadamard",
"Hermitian",
"Identity",
"Projector",
"SparseHamiltonian",
"Hamiltonian",
"Sum",
"SProd",
"Prod",
"Exp",
"Evolution",
}
def __init__(
self, wires, *, r_dtype=np.float64, c_dtype=np.complex128, shots=None, analytic=None
):
super().__init__(wires, shots, r_dtype=r_dtype, c_dtype=c_dtype, analytic=analytic)
self._debugger = None
# Create the initial state. Internally, we store the
# state as an array of dimension [2]*wires.
self._state = self._create_basis_state(0)
self._pre_rotated_state = self._state
self._apply_ops = {
"PauliX": self._apply_x,
"PauliY": self._apply_y,
"PauliZ": self._apply_z,
"Hadamard": self._apply_hadamard,
"S": self._apply_s,
"T": self._apply_t,
"SX": self._apply_sx,
"CNOT": self._apply_cnot,
"SWAP": self._apply_swap,
"CZ": self._apply_cz,
"Toffoli": self._apply_toffoli,
}
@property
def stopping_condition(self):
def accepts_obj(obj):
if obj.name == "QFT" and len(obj.wires) >= 6:
return False
if obj.name == "GroverOperator" and len(obj.wires) >= 13:
return False
if getattr(obj, "has_matrix", False):
# pow operations dont work with backprop or adjoint without decomposition
# use class name string so we don't need to use isinstance check
return not (obj.__class__.__name__ == "Pow" and qml.operation.is_trainable(obj))
return obj.name in self.observables.union(self.operations)
return qml.BooleanFn(accepts_obj)
@functools.lru_cache()
def map_wires(self, wires):
# temporarily overwrite this method to bypass
# wire map that produces Wires objects
try:
mapped_wires = [self.wire_map[w] for w in wires]
except KeyError as e:
raise WireError(
f"Did not find some of the wires {wires.labels} on device with wires {self.wires.labels}."
) from e
return mapped_wires
def define_wire_map(self, wires):
# temporarily overwrite this method to bypass
# wire map that produces Wires objects
consecutive_wires = range(self.num_wires)
wire_map = zip(wires, consecutive_wires)
return dict(wire_map)
# pylint: disable=arguments-differ
def _get_batch_size(self, tensor, expected_shape, expected_size):
"""Determine whether a tensor has an additional batch dimension for broadcasting,
compared to an expected_shape."""
size = self._size(tensor)
if self._ndim(tensor) > len(expected_shape) or size > expected_size:
return size // expected_size
return None
# pylint: disable=arguments-differ
def apply(self, operations, rotations=None, **kwargs):
rotations = rotations or []
# apply the circuit operations
for i, operation in enumerate(operations):
if i > 0 and isinstance(operation, (StatePrep, BasisState)):
raise DeviceError(
f"Operation {operation.name} cannot be used after other Operations have already been applied "
f"on a {self.short_name} device."
)
if isinstance(operation, StatePrep):
self._apply_state_vector(operation.parameters[0], operation.wires)
elif isinstance(operation, BasisState):
self._apply_basis_state(operation.parameters[0], operation.wires)
elif isinstance(operation, Snapshot):
if self._debugger and self._debugger.active:
state_vector = np.array(self._flatten(self._state))
if operation.tag:
self._debugger.snapshots[operation.tag] = state_vector
else:
self._debugger.snapshots[len(self._debugger.snapshots)] = state_vector
elif isinstance(operation, ParametrizedEvolution):
self._state = self._apply_parametrized_evolution(self._state, operation)
else:
self._state = self._apply_operation(self._state, operation)
# store the pre-rotated state
self._pre_rotated_state = self._state
# apply the circuit rotations
for operation in rotations:
self._state = self._apply_operation(self._state, operation)
def _apply_parametrized_evolution(self, state: TensorLike, operation: ParametrizedEvolution):
"""Applies a parametrized evolution to the input state.
Args:
state (array[complex]): input state
operation (ParametrizedEvolution): operation to apply on the state
"""
raise NotImplementedError(
f"The device {self.short_name} cannot execute a ParametrizedEvolution operation. "
"Please use the jax interface."
)
def _apply_operation(self, state, operation):
"""Applies operations to the input state.
Args:
state (array[complex]): input state
operation (~.Operation): operation to apply on the device
Returns:
array[complex]: output state
"""
if operation.__class__.__name__ == "Identity":
return state
if operation.name == "GlobalPhase":
return self._apply_global_phase(state, operation)
wires = operation.wires
if str(operation.name) in self._apply_ops: # cast to string because of Tensor
shift = int(self._ndim(state) > self.num_wires)
axes = [ax + shift for ax in self.wires.indices(wires)]
return self._apply_ops[operation.name](state, axes)
matrix = self._asarray(self._get_unitary_matrix(operation), dtype=self.C_DTYPE)
if operation in diagonal_in_z_basis:
return self._apply_diagonal_unitary(state, matrix, wires)
if len(wires) <= 2:
# Einsum is faster for small gates
return self._apply_unitary_einsum(state, matrix, wires)
return self._apply_unitary(state, matrix, wires)
def _apply_global_phase(self, state, operation: qml.GlobalPhase): # pylint: disable=no-self-use
"""Applies a :class:`~.GlobalPhase` operation to the state."""
return qml.math.exp(-1j * operation.data[0]) * state
def _apply_x(self, state, axes, **kwargs):
"""Applies a PauliX gate by rolling 1 unit along the axis specified in ``axes``.
Rolling by 1 unit along the axis means that the :math:`|0 \rangle` state with index ``0`` is
shifted to the :math:`|1 \rangle` state with index ``1``. Likewise, since rolling beyond
the last index loops back to the first, :math:`|1 \rangle` is transformed to
:math:`|0\rangle`.
Args:
state (array[complex]): input state
axes (List[int]): target axes to apply transformation
Returns:
array[complex]: output state
"""
return self._roll(state, 1, axes[0])
def _apply_y(self, state, axes, **kwargs):
"""Applies a PauliY gate by adding a negative sign to the 1 index along the axis specified
in ``axes``, rolling one unit along the same axis, and multiplying the result by 1j.
Args:
state (array[complex]): input state
axes (List[int]): target axes to apply transformation
Returns:
array[complex]: output state
"""
return 1j * self._apply_x(self._apply_z(state, axes), axes)
def _apply_z(self, state, axes, **kwargs):
"""Applies a PauliZ gate by adding a negative sign to the 1 index along the axis specified
in ``axes``.
Args:
state (array[complex]): input state
axes (List[int]): target axes to apply transformation
Returns:
array[complex]: output state
"""
return self._apply_phase(state, axes, -1)
def _apply_hadamard(self, state, axes, **kwargs):
"""Apply the Hadamard gate by combining the results of applying the PauliX and PauliZ gates.
Args:
state (array[complex]): input state
axes (List[int]): target axes to apply transformation
Returns:
array[complex]: output state
"""
state_x = self._apply_x(state, axes)
state_z = self._apply_z(state, axes)
return self._const_mul(SQRT2INV, state_x + state_z)
def _apply_s(self, state, axes, inverse=False):
return self._apply_phase(state, axes, 1j, inverse)
def _apply_t(self, state, axes, inverse=False):
return self._apply_phase(state, axes, TPHASE, inverse)
def _apply_sx(self, state, axes, inverse=False):
"""Apply the Square Root X gate.
Args:
state (array[complex]): input state
axes (List[int]): target axes to apply transformation
Returns:
array[complex]: output state
"""
if inverse:
return 0.5 * ((1 - 1j) * state + (1 + 1j) * self._apply_x(state, axes))
return 0.5 * ((1 + 1j) * state + (1 - 1j) * self._apply_x(state, axes))
def _apply_cnot(self, state, axes, **kwargs):
"""Applies a CNOT gate by slicing along the first axis specified in ``axes`` and then
applying an X transformation along the second axis.
By slicing along the first axis, we are able to select all of the amplitudes with a
corresponding :math:`|1\rangle` for the control qubit. This means we then just need to apply
a :class:`~.PauliX` (NOT) gate to the result.
Args:
state (array[complex]): input state
axes (List[int]): target axes to apply transformation
Returns:
array[complex]: output state
"""
ndim = self._ndim(state)
sl_0 = _get_slice(0, axes[0], ndim)
sl_1 = _get_slice(1, axes[0], ndim)
# We will be slicing into the state according to state[sl_1], giving us all of the
# amplitudes with a |1> for the control qubit. The resulting array has lost an axis
# relative to state and we need to be careful about the axis we apply the PauliX rotation
# to. If axes[1] is larger than axes[0], then we need to shift the target axis down by
# one, otherwise we can leave as-is. For example: a state has [0, 1, 2, 3], control=1,
# target=3. Then, state[sl_1] has 3 axes and target=3 now corresponds to the second axis.
if axes[1] > axes[0]:
target_axes = [axes[1] - 1]
else:
target_axes = [axes[1]]
state_x = self._apply_x(state[sl_1], axes=target_axes)
return self._stack([state[sl_0], state_x], axis=axes[0])
def _apply_toffoli(self, state, axes, **kwargs):
"""Applies a Toffoli gate by slicing along the axis of the greater control qubit, slicing
each of the resulting sub-arrays along the axis of the smaller control qubit, and then applying
an X transformation along the axis of the target qubit of the fourth sub-sub-array.
By performing two consecutive slices in this way, we are able to select all of the amplitudes with
a corresponding :math:`|11\rangle` for the two control qubits. This means we then just need to apply
a :class:`~.PauliX` (NOT) gate to the result.
Args:
state (array[complex]): input state
axes (List[int]): target axes to apply transformation
Returns:
array[complex]: output state
"""
cntrl_max = np.argmax(axes[:2])
cntrl_min = cntrl_max ^ 1
ndim = self._ndim(state)
sl_a0 = _get_slice(0, axes[cntrl_max], ndim)
sl_a1 = _get_slice(1, axes[cntrl_max], ndim)
sl_b0 = _get_slice(0, axes[cntrl_min], ndim - 1)
sl_b1 = _get_slice(1, axes[cntrl_min], ndim - 1)
# If both controls are smaller than the target, shift the target axis down by two. If one
# control is greater and one control is smaller than the target, shift the target axis
# down by one. If both controls are greater than the target, leave the target axis as-is.
if axes[cntrl_min] > axes[2]:
target_axes = [axes[2]]
elif axes[cntrl_max] > axes[2]:
target_axes = [axes[2] - 1]
else:
target_axes = [axes[2] - 2]
# state[sl_a1][sl_b1] gives us all of the amplitudes with a |11> for the two control qubits.
state_x = self._apply_x(state[sl_a1][sl_b1], axes=target_axes)
state_stacked_a1 = self._stack([state[sl_a1][sl_b0], state_x], axis=axes[cntrl_min])
return self._stack([state[sl_a0], state_stacked_a1], axis=axes[cntrl_max])
def _apply_swap(self, state, axes, **kwargs):
"""Applies a SWAP gate by performing a partial transposition along the specified axes.
Args:
state (array[complex]): input state
axes (List[int]): target axes to apply transformation
Returns:
array[complex]: output state
"""
all_axes = list(range(len(state.shape)))
all_axes[axes[0]] = axes[1]
all_axes[axes[1]] = axes[0]
return self._transpose(state, all_axes)
def _apply_cz(self, state, axes, **kwargs):
"""Applies a CZ gate by slicing along the first axis specified in ``axes`` and then
applying a Z transformation along the second axis.
Args:
state (array[complex]): input state
axes (List[int]): target axes to apply transformation
Returns:
array[complex]: output state
"""
ndim = self._ndim(state)
sl_0 = _get_slice(0, axes[0], ndim)
sl_1 = _get_slice(1, axes[0], ndim)
if axes[1] > axes[0]:
target_axes = [axes[1] - 1]
else:
target_axes = [axes[1]]
state_z = self._apply_z(state[sl_1], axes=target_axes)
return self._stack([state[sl_0], state_z], axis=axes[0])
def _apply_phase(self, state, axes, parameters, inverse=False):
"""Applies a phase onto the 1 index along the axis specified in ``axes``.
Args:
state (array[complex]): input state
axes (List[int]): target axes to apply transformation
parameters (float): phase to apply
inverse (bool): whether to apply the inverse phase
Returns:
array[complex]: output state
"""
ndim = self._ndim(state)
sl_0 = _get_slice(0, axes[0], ndim)
sl_1 = _get_slice(1, axes[0], ndim)
phase = self._conj(parameters) if inverse else parameters
return self._stack([state[sl_0], self._const_mul(phase, state[sl_1])], axis=axes[0])
def expval(self, observable, shot_range=None, bin_size=None):
"""Returns the expectation value of a Hamiltonian observable. When the observable is a
``Hamiltonian`` or ``SparseHamiltonian`` object, the expectation value is computed directly
from the sparse matrix representation, which leads to faster execution.
Args:
observable (~.Observable): a PennyLane observable
shot_range (tuple[int]): 2-tuple of integers specifying the range of samples
to use. If not specified, all samples are used.
bin_size (int): Divides the shot range into bins of size ``bin_size``, and
returns the measurement statistic separately over each bin. If not
provided, the entire shot range is treated as a single bin.
Returns:
float: returns the expectation value of the observable
.. warning::
This function does not support broadcasting if ``observable`` is a
:class:``~.Hamiltonian`` and the device interface or the interface of the
Hamiltonian is not NumPy or Autograd
"""
is_state_batched = self._ndim(self.state) == 2
# intercept Sums
if isinstance(observable, Sum) and not self.shots:
return measure(
ExpectationMP(observable.map_wires(self.wire_map)),
self._pre_rotated_state,
is_state_batched,
)
# intercept other Hamiltonians
# TODO: Ideally, this logic should not live in the Device, but be moved
# to a component that can be re-used by devices as needed.
if observable.name not in ("Hamiltonian", "SparseHamiltonian"):
return super().expval(observable, shot_range=shot_range, bin_size=bin_size)
assert self.shots is None, f"{observable.name} must be used with shots=None"
self.map_wires(observable.wires)
backprop_mode = (
not isinstance(self.state, np.ndarray)
or any(not isinstance(d, (float, np.ndarray)) for d in observable.data)
) and observable.name == "Hamiltonian"
if backprop_mode:
# TODO[dwierichs]: This branch is not adapted to broadcasting yet
if is_state_batched:
raise NotImplementedError(
"Expectation values of Hamiltonians for interface!=None are "
"not supported together with parameter broadcasting yet"
)
# We must compute the expectation value assuming that the Hamiltonian
# coefficients *and* the quantum states are tensor objects.
# Compute <psi| H |psi> via sum_i coeff_i * <psi| PauliWord |psi> using a sparse
# representation of the Pauliword
res = qml.math.cast(qml.math.convert_like(0.0, observable.data), dtype=complex)
interface = qml.math.get_interface(self.state)
# Note: it is important that we use the Hamiltonian's data and not the coeffs
# attribute. This is because the .data attribute may be 'unwrapped' as required by
# the interfaces, whereas the .coeff attribute will always be the same input dtype
# that the user provided.
for op, coeff in zip(observable.ops, observable.data):
# extract a scipy.sparse.coo_matrix representation of this Pauli word
coo = qml.operation.Tensor(op).sparse_matrix(wire_order=self.wires, format="coo")
Hmat = qml.math.cast(qml.math.convert_like(coo.data, self.state), self.C_DTYPE)
product = (
self._gather(self._conj(self.state), coo.row)
* Hmat
* self._gather(self.state, coo.col)
)
c = qml.math.convert_like(coeff, product)
if interface == "tensorflow":
c = qml.math.cast(c, "complex128")
res = qml.math.convert_like(res, product) + qml.math.sum(c * product)
else:
# Coefficients and the state are not trainable, we can be more
# efficient in how we compute the Hamiltonian sparse matrix.
Hmat = observable.sparse_matrix(wire_order=self.wires)
state = qml.math.toarray(self.state)
if is_state_batched:
res = qml.math.array(
[
csr_matrix.dot(
csr_matrix(self._conj(_state)),
csr_matrix.dot(Hmat, csr_matrix(_state[..., None])),
).toarray()[0]
for _state in state
]
)
else:
res = csr_matrix.dot(
csr_matrix(self._conj(state)),
csr_matrix.dot(Hmat, csr_matrix(state[..., None])),
).toarray()[0]
if observable.name == "Hamiltonian":
res = qml.math.squeeze(res)
return self._real(res)
def _get_unitary_matrix(self, unitary): # pylint: disable=no-self-use
"""Return the matrix representing a unitary operation.
Args:
unitary (~.Operation): a PennyLane unitary operation
Returns:
array[complex]: Returns a 2D matrix representation of
the unitary in the computational basis, or, in the case of a diagonal unitary,
a 1D array representing the matrix diagonal.
"""
if unitary in diagonal_in_z_basis:
return unitary.eigvals()
return unitary.matrix()
@classmethod
def capabilities(cls):
capabilities = super().capabilities().copy()
capabilities.update(
model="qubit",
supports_inverse_operations=True,
supports_analytic_computation=True,
supports_broadcasting=True,
returns_state=True,
passthru_devices={
"tf": "default.qubit.tf",
"torch": "default.qubit.torch",
"autograd": "default.qubit.autograd",
"jax": "default.qubit.jax",
},
)
return capabilities
def _create_basis_state(self, index):
"""Return a computational basis state over all wires.
Args:
index (int): integer representing the computational basis state
Returns:
array[complex]: complex array of shape ``[2]*self.num_wires``
representing the statevector of the basis state
Note: This function does not support broadcasted inputs yet.
"""
state = np.zeros(2**self.num_wires, dtype=np.complex128)
state[index] = 1
state = self._asarray(state, dtype=self.C_DTYPE)
return self._reshape(state, [2] * self.num_wires)
@property
def state(self):
dim = 2**self.num_wires
batch_size = self._get_batch_size(self._pre_rotated_state, (2,) * self.num_wires, dim)
# Do not flatten the state completely but leave the broadcasting dimension if there is one
shape = (batch_size, dim) if batch_size is not None else (dim,)
return self._reshape(self._pre_rotated_state, shape)
def _apply_state_vector(self, state, device_wires):
"""Initialize the internal state vector in a specified state.
Args:
state (array[complex]): normalized input state of length ``2**len(wires)``
or broadcasted state of shape ``(batch_size, 2**len(wires))``
device_wires (Wires): wires that get initialized in the state
"""
# translate to wire labels used by device
device_wires = self.map_wires(device_wires)
dim = 2 ** len(device_wires)
state = self._asarray(state, dtype=self.C_DTYPE)
batch_size = self._get_batch_size(state, (dim,), dim)
output_shape = [2] * self.num_wires
if batch_size is not None:
output_shape.insert(0, batch_size)
if len(device_wires) == self.num_wires and sorted(device_wires) == device_wires:
# Initialize the entire device state with the input state
self._state = self._reshape(state, output_shape)
return
# generate basis states on subset of qubits via the cartesian product
basis_states = np.array(list(itertools.product([0, 1], repeat=len(device_wires))))
# get basis states to alter on full set of qubits
unravelled_indices = np.zeros((2 ** len(device_wires), self.num_wires), dtype=int)
unravelled_indices[:, device_wires] = basis_states
# get indices for which the state is changed to input state vector elements
ravelled_indices = np.ravel_multi_index(unravelled_indices.T, [2] * self.num_wires)
if batch_size is not None:
state = self._scatter(
(slice(None), ravelled_indices), state, [batch_size, 2**self.num_wires]
)
else:
state = self._scatter(ravelled_indices, state, [2**self.num_wires])
state = self._reshape(state, output_shape)
self._state = self._asarray(state, dtype=self.C_DTYPE)
def _apply_basis_state(self, state, wires):
"""Initialize the state vector in a specified computational basis state.
Args:
state (array[int]): computational basis state of shape ``(wires,)``
consisting of 0s and 1s.
wires (Wires): wires that the provided computational state should be initialized on
Note: This function does not support broadcasted inputs yet.
"""
# translate to wire labels used by device
device_wires = self.map_wires(wires)
# length of basis state parameter
n_basis_state = len(state)
if not set(state.tolist()).issubset({0, 1}):
raise ValueError("BasisState parameter must consist of 0 or 1 integers.")
if n_basis_state != len(device_wires):
raise ValueError("BasisState parameter and wires must be of equal length.")
# get computational basis state number
basis_states = 2 ** (self.num_wires - 1 - np.array(device_wires))
basis_states = qml.math.convert_like(basis_states, state)
num = int(qml.math.dot(state, basis_states))
self._state = self._create_basis_state(num)
def _apply_unitary(self, state, mat, wires):
r"""Apply multiplication of a matrix to subsystems of the quantum state.
Args:
state (array[complex]): input state
mat (array): matrix to multiply
wires (Wires): target wires
Returns:
array[complex]: output state
Note: This function does not support simultaneously broadcasted states and matrices yet.
"""
# translate to wire labels used by device
device_wires = self.map_wires(wires)
dim = 2 ** len(device_wires)
mat_batch_size = self._get_batch_size(mat, (dim, dim), dim**2)
state_batch_size = self._get_batch_size(state, (2,) * self.num_wires, 2**self.num_wires)
shape = [2] * (len(device_wires) * 2)
state_axes = device_wires
# If the matrix is broadcasted, it is reshaped to have leading axis of size mat_batch_size
if mat_batch_size:
shape.insert(0, mat_batch_size)
if state_batch_size:
raise NotImplementedError(
"Applying a broadcasted unitary to an already broadcasted state via "
"_apply_unitary is not supported. Broadcasting sizes are "
f"({mat_batch_size}, {state_batch_size})."
)
# If the state is broadcasted, the affected state axes need to be shifted by 1.
if state_batch_size:
state_axes = [ax + 1 for ax in state_axes]
mat = self._cast(self._reshape(mat, shape), dtype=self.C_DTYPE)
axes = (np.arange(-len(device_wires), 0), state_axes)
tdot = self._tensordot(mat, state, axes=axes)
# tensordot causes the axes given in `wires` to end up in the first positions
# of the resulting tensor. This corresponds to a (partial) transpose of
# the correct output state
# We'll need to invert this permutation to put the indices in the correct place
unused_idxs = [idx for idx in range(self.num_wires) if idx not in device_wires]
perm = list(device_wires) + unused_idxs
# If the matrix is broadcasted, all but the first dimension are shifted by 1
if mat_batch_size:
perm = [idx + 1 for idx in perm]
perm.insert(0, 0)
if state_batch_size:
# As the state broadcasting dimension always is the first in the state, it always
# ends up in position `len(device_wires)` after the tensordot. The -1 causes it
# being permuted to the leading dimension after transposition
perm.insert(len(device_wires), -1)
inv_perm = np.argsort(perm) # argsort gives inverse permutation
return self._transpose(tdot, inv_perm)
def _apply_unitary_einsum(self, state, mat, wires):
r"""Apply multiplication of a matrix to subsystems of the quantum state.
This function uses einsum instead of tensordot. This approach is only
faster for single- and two-qubit gates.
Args:
state (array[complex]): input state
mat (array): matrix to multiply
wires (Wires): target wires
Returns:
array[complex]: output state
"""
# translate to wire labels used by device
device_wires = self.map_wires(wires)
dim = 2 ** len(device_wires)
batch_size = self._get_batch_size(mat, (dim, dim), dim**2)
# If the matrix is broadcasted, it is reshaped to have leading axis of size mat_batch_size
shape = [2] * (len(device_wires) * 2)
if batch_size is not None:
shape.insert(0, batch_size)
mat = self._cast(self._reshape(mat, shape), dtype=self.C_DTYPE)
# Tensor indices of the quantum state
state_indices = ABC[: self.num_wires]
# Indices of the quantum state affected by this operation
affected_indices = "".join(ABC_ARRAY[list(device_wires)].tolist())
# All affected indices will be summed over, so we need the same number of new indices
new_indices = ABC[self.num_wires : self.num_wires + len(device_wires)]
# The new indices of the state are given by the old ones with the affected indices
# replaced by the new_indices
new_state_indices = functools.reduce(
lambda old_string, idx_pair: old_string.replace(idx_pair[0], idx_pair[1]),
zip(affected_indices, new_indices),
state_indices,
)
# We now put together the indices in the notation numpy's einsum requires
# This notation allows for the state, the matrix, or both to be broadcasted
einsum_indices = (
f"...{new_indices}{affected_indices},...{state_indices}->...{new_state_indices}"
)
return self._einsum(einsum_indices, mat, state)
def _apply_diagonal_unitary(self, state, phases, wires):
r"""Apply multiplication of a phase vector to subsystems of the quantum state.
This represents the multiplication with diagonal gates in a more efficient manner.
Args:
state (array[complex]): input state
phases (array): vector to multiply
wires (Wires): target wires
Returns:
array[complex]: output state
"""
# translate to wire labels used by device
device_wires = self.map_wires(wires)
dim = 2 ** len(device_wires)
batch_size = self._get_batch_size(phases, (dim,), dim)
# reshape vectors
shape = [2] * len(device_wires)
if batch_size is not None:
shape.insert(0, batch_size)
phases = self._cast(self._reshape(phases, shape), dtype=self.C_DTYPE)
state_indices = ABC[: self.num_wires]
affected_indices = "".join(ABC_ARRAY[list(device_wires)].tolist())
einsum_indices = f"...{affected_indices},...{state_indices}->...{state_indices}"
return self._einsum(einsum_indices, phases, state)
def reset(self):
"""Reset the device"""
super().reset()
# init the state vector to |00..0>
self._state = self._create_basis_state(0)
self._pre_rotated_state = self._state
def analytic_probability(self, wires=None):
if self._state is None:
return None
dim = 2**self.num_wires
batch_size = self._get_batch_size(self._state, [2] * self.num_wires, dim)
flat_state = self._reshape(
self._state, (batch_size, dim) if batch_size is not None else (dim,)
)
real_state = self._real(flat_state)
imag_state = self._imag(flat_state)
return self.marginal_prob(real_state**2 + imag_state**2, wires)
def classical_shadow(self, obs, circuit):
"""
Returns the measured bits and recipes in the classical shadow protocol.
The protocol is described in detail in the `classical shadows paper <https://arxiv.org/abs/2002.08953>`_.
This measurement process returns the randomized Pauli measurements (the ``recipes``)
that are performed for each qubit and snapshot as an integer:
- 0 for Pauli X,
- 1 for Pauli Y, and
- 2 for Pauli Z.
It also returns the measurement results (the ``bits``); 0 if the 1 eigenvalue
is sampled, and 1 if the -1 eigenvalue is sampled.
The device shots are used to specify the number of snapshots. If ``T`` is the number
of shots and ``n`` is the number of qubits, then both the measured bits and the
Pauli measurements have shape ``(T, n)``.
This implementation leverages vectorization and offers a significant speed-up over
the generic implementation.
.. Note::
This method internally calls ``np.einsum`` which supports at most 52 indices,
thus the classical shadow measurement for this device supports at most 52
qubits.
.. seealso:: :func:`~pennylane.classical_shadow`
Args:
obs (~.pennylane.measurements.ClassicalShadowMP): The classical shadow measurement process
circuit (~.tape.QuantumTape): The quantum tape that is being executed
Returns:
tensor_like[int]: A tensor with shape ``(2, T, n)``, where the first row represents
the measured bits and the second represents the recipes used.
"""
wires = obs.wires
seed = obs.seed
n_qubits = len(wires)
n_snapshots = self.shots
device_qubits = len(self.wires)
mapped_wires = np.array(self.map_wires(wires))
# seed the random measurement generation so that recipes
# are the same for different executions with the same seed
rng = np.random.RandomState(seed)
recipes = rng.randint(0, 3, size=(n_snapshots, n_qubits))
obs_list = self._stack(
[
qml.PauliX.compute_matrix(),
qml.PauliY.compute_matrix(),
qml.PauliZ.compute_matrix(),
]
)
uni_list = self._stack(
[
qml.Hadamard.compute_matrix(),
qml.Hadamard.compute_matrix() @ qml.RZ.compute_matrix(-np.pi / 2),
qml.Identity.compute_matrix(),
]
)
obs = obs_list[recipes]
uni = uni_list[recipes]
# There's a significant speedup if we use the following iterative
# process to perform the randomized Pauli measurements:
# 1. Randomly generate Pauli observables for all snapshots for
# a single qubit (e.g. the first qubit).
# 2. Compute the expectation of each Pauli observable on the first
# qubit by tracing out all other qubits.
# 3. Sample the first qubit based on each Pauli expectation.
# 4. For all snapshots, determine the collapsed state of the remaining
# qubits based on the sample result.
# 4. Repeat iteratively until no qubits are remaining.
#
# Observe that after the first iteration, the second qubit will become the
# "first" qubit in the process. The advantage to this approach as opposed to
# simulataneously computing the Pauli expectations for each qubit is that
# the partial traces are computed over iteratively smaller subsystems, leading
# to a significant speed-up.
# transpose the state so that the measured wires appear first
unmeasured_wires = [i for i in range(len(self.wires)) if i not in mapped_wires]
transposed_state = np.transpose(self._state, axes=mapped_wires.tolist() + unmeasured_wires)
outcomes = np.zeros((n_snapshots, n_qubits))
stacked_state = self._stack([transposed_state for _ in range(n_snapshots)])
for i in range(n_qubits):
# trace out every qubit except the first
first_qubit_state = self._einsum(
f"{ABC[device_qubits - i + 1]}{ABC[:device_qubits - i]},{ABC[device_qubits - i + 1]}{ABC[device_qubits - i]}{ABC[1:device_qubits - i]}"
f"->{ABC[device_qubits - i + 1]}a{ABC[device_qubits - i]}",
stacked_state,
self._conj(stacked_state),
)
# sample the observables on the first qubit
probs = (self._einsum("abc,acb->a", first_qubit_state, obs[:, i]) + 1) / 2
samples = np.random.uniform(0, 1, size=probs.shape) > probs
outcomes[:, i] = samples
# collapse the state of the remaining qubits; the next qubit in line
# becomes the first qubit for the next iteration
rotated_state = self._einsum("ab...,acb->ac...", stacked_state, uni[:, i])
stacked_state = rotated_state[np.arange(n_snapshots), self._cast(samples, np.int8)]
# re-normalize the collapsed state
norms = np.sqrt(
np.sum(
np.abs(stacked_state) ** 2, tuple(range(1, device_qubits - i)), keepdims=True
)
)
stacked_state /= norms
return self._cast(self._stack([outcomes, recipes]), dtype=np.int8)
def _get_diagonalizing_gates(self, circuit: qml.tape.QuantumTape) -> List[Operation]:
meas_filtered = [
m
for m in circuit.measurements
if m.obs is None or not isinstance(m.obs, qml.Hamiltonian)
]
return super()._get_diagonalizing_gates(qml.tape.QuantumScript(measurements=meas_filtered))
|
ec53d5473b76e2bd4d212e3a4f55465db2fcb6bf
|
b6ef14f2450db87b8b902ee2b606a3d3b223f0ca
|
/dns/dnssecalgs/rsa.py
|
e95dcf1ddc45ad7c2731b258f5edd3abd34e5248
|
[
"ISC",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
rthalley/dnspython
|
88f6b18738b2548e83e6f82e5a296dfa555b77d3
|
c465d3c0e15a52e4109c9f80131e657d8bdb0471
|
refs/heads/master
| 2023-08-30T17:03:04.472884
| 2023-08-29T21:11:10
| 2023-08-29T21:11:17
| 2,261,155
| 2,049
| 554
|
NOASSERTION
| 2023-09-14T16:05:48
| 2011-08-24T11:36:39
|
Python
|
UTF-8
|
Python
| false
| false
| 3,555
|
py
|
rsa.py
|
import math
import struct
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding, rsa
from dns.dnssecalgs.cryptography import CryptographyPrivateKey, CryptographyPublicKey
from dns.dnssectypes import Algorithm
from dns.rdtypes.ANY.DNSKEY import DNSKEY
class PublicRSA(CryptographyPublicKey):
key: rsa.RSAPublicKey
key_cls = rsa.RSAPublicKey
algorithm: Algorithm
chosen_hash: hashes.HashAlgorithm
def verify(self, signature: bytes, data: bytes) -> None:
self.key.verify(signature, data, padding.PKCS1v15(), self.chosen_hash)
def encode_key_bytes(self) -> bytes:
"""Encode a public key per RFC 3110, section 2."""
pn = self.key.public_numbers()
_exp_len = math.ceil(int.bit_length(pn.e) / 8)
exp = int.to_bytes(pn.e, length=_exp_len, byteorder="big")
if _exp_len > 255:
exp_header = b"\0" + struct.pack("!H", _exp_len)
else:
exp_header = struct.pack("!B", _exp_len)
if pn.n.bit_length() < 512 or pn.n.bit_length() > 4096:
raise ValueError("unsupported RSA key length")
return exp_header + exp + pn.n.to_bytes((pn.n.bit_length() + 7) // 8, "big")
@classmethod
def from_dnskey(cls, key: DNSKEY) -> "PublicRSA":
cls._ensure_algorithm_key_combination(key)
keyptr = key.key
(bytes_,) = struct.unpack("!B", keyptr[0:1])
keyptr = keyptr[1:]
if bytes_ == 0:
(bytes_,) = struct.unpack("!H", keyptr[0:2])
keyptr = keyptr[2:]
rsa_e = keyptr[0:bytes_]
rsa_n = keyptr[bytes_:]
return cls(
key=rsa.RSAPublicNumbers(
int.from_bytes(rsa_e, "big"), int.from_bytes(rsa_n, "big")
).public_key(default_backend())
)
class PrivateRSA(CryptographyPrivateKey):
key: rsa.RSAPrivateKey
key_cls = rsa.RSAPrivateKey
public_cls = PublicRSA
default_public_exponent = 65537
def sign(self, data: bytes, verify: bool = False) -> bytes:
"""Sign using a private key per RFC 3110, section 3."""
signature = self.key.sign(data, padding.PKCS1v15(), self.public_cls.chosen_hash)
if verify:
self.public_key().verify(signature, data)
return signature
@classmethod
def generate(cls, key_size: int) -> "PrivateRSA":
return cls(
key=rsa.generate_private_key(
public_exponent=cls.default_public_exponent,
key_size=key_size,
backend=default_backend(),
)
)
class PublicRSAMD5(PublicRSA):
algorithm = Algorithm.RSAMD5
chosen_hash = hashes.MD5()
class PrivateRSAMD5(PrivateRSA):
public_cls = PublicRSAMD5
class PublicRSASHA1(PublicRSA):
algorithm = Algorithm.RSASHA1
chosen_hash = hashes.SHA1()
class PrivateRSASHA1(PrivateRSA):
public_cls = PublicRSASHA1
class PublicRSASHA1NSEC3SHA1(PublicRSA):
algorithm = Algorithm.RSASHA1NSEC3SHA1
chosen_hash = hashes.SHA1()
class PrivateRSASHA1NSEC3SHA1(PrivateRSA):
public_cls = PublicRSASHA1NSEC3SHA1
class PublicRSASHA256(PublicRSA):
algorithm = Algorithm.RSASHA256
chosen_hash = hashes.SHA256()
class PrivateRSASHA256(PrivateRSA):
public_cls = PublicRSASHA256
class PublicRSASHA512(PublicRSA):
algorithm = Algorithm.RSASHA512
chosen_hash = hashes.SHA512()
class PrivateRSASHA512(PrivateRSA):
public_cls = PublicRSASHA512
|
5fe9ccab532cba8c8a0a67a61f861d88f03d1fb1
|
a8f1887959f08b609cecd1049ec84094dc2505f5
|
/src/sagemaker_xgboost_container/algorithm_mode/integration.py
|
563fac1ef1f26ffb85f0bf558b564e3adad00ce9
|
[
"Apache-2.0"
] |
permissive
|
aws/sagemaker-xgboost-container
|
b4753bbc1eb1a5f71672db6928289c9e44ed0d23
|
d2b7e83038956e158d2b07c809026a8ffb2e832c
|
refs/heads/master
| 2023-09-04T19:56:07.539331
| 2023-08-29T19:48:37
| 2023-08-29T19:48:37
| 187,721,865
| 107
| 76
|
Apache-2.0
| 2023-08-29T19:49:15
| 2019-05-20T22:30:07
|
Python
|
UTF-8
|
Python
| false
| false
| 1,489
|
py
|
integration.py
|
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License'). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the 'license' file accompanying this file. This file is
# distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import logging
import logging.config
FORMATTERS = {
"verbose": {
"format": "[%(asctime)s:%(levelname)s] %(message)s",
"datefmt": "%Y-%m-%d:%H:%M:%S",
},
"simple": {"format": "[%(levelname)s:%(name)s] %(message)s"},
}
CONSOLE_LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": FORMATTERS,
"handlers": {
"console": {"level": "INFO", "formatter": "verbose", "class": "logging.StreamHandler", "stream": None},
},
"root": {
"handlers": ["console"],
"level": "INFO",
},
}
LOGGING_CONFIGS = {
"console_only": CONSOLE_LOGGING,
}
def setup_main_logger(name):
"""
Return a logger that configures logging for the main application.
:param name: Name of the returned logger.
"""
log_config = LOGGING_CONFIGS["console_only"]
logging.config.dictConfig(log_config)
return logging.getLogger(name)
|
01a449f006f08fdbae1f17b8062e83da1e9eb32b
|
07434513334237d453faae8972b136f28b8d1e2c
|
/remme/shared/forms/__init__.py
|
57949205fcc7079ee476bc93e965dca821c23d78
|
[
"Apache-2.0"
] |
permissive
|
Remmeauth/remme-core
|
5a595391659e60ce82fbe73883be13df8a52dad9
|
3a8ac8d8f6ba1a1126c028c81d350c9475fe9834
|
refs/heads/master
| 2021-03-27T14:17:19.966578
| 2019-05-21T17:00:15
| 2019-05-21T17:00:15
| 121,400,220
| 132
| 30
|
Apache-2.0
| 2021-12-29T20:13:14
| 2018-02-13T15:36:38
|
Python
|
UTF-8
|
Python
| false
| false
| 718
|
py
|
__init__.py
|
from .base import ProtoForm
from .pub_key import (
NewPublicKeyPayloadForm,
RevokePubKeyPayloadForm,
)
from .account import (
TransferPayloadForm,
GenesisPayloadForm,
get_address_form,
)
from .pub_key import (
NewPublicKeyPayloadForm,
NewPubKeyStoreAndPayPayloadForm,
RevokePubKeyPayloadForm,
)
from .account import TransferPayloadForm, GenesisPayloadForm
from .atomic_swap import (
AtomicSwapInitPayloadForm,
AtomicSwapApprovePayloadForm,
AtomicSwapExpirePayloadForm,
AtomicSwapSetSecretLockPayloadForm,
AtomicSwapClosePayloadForm,
AtomicSwapForm,
)
from .identifier import (
IdentifierForm,
IdentifiersForm,
)
from .personal import (
NodePKForm,
)
|
c1988b00af449128640cd9820902026f2f78717e
|
3395a234e7c80d011607e79c49cd48bf516f256b
|
/dependencies/jedi/third_party/django-stubs/django-stubs/utils/tree.pyi
|
75a7c7b24eadaaeaaf8cadd4d3f9c9c3720304f6
|
[
"MIT"
] |
permissive
|
srusskih/SublimeJEDI
|
67329b72e184bc9584843968dcc534a002c797a1
|
95c185d778425c04536d53517b0e3fe6dedf8e59
|
refs/heads/master
| 2023-08-24T11:30:37.801834
| 2022-08-30T09:04:17
| 2022-08-30T09:04:17
| 6,241,108
| 669
| 125
|
MIT
| 2022-08-30T09:04:18
| 2012-10-16T08:23:57
|
Python
|
UTF-8
|
Python
| false
| false
| 799
|
pyi
|
tree.pyi
|
from typing import Any, Dict, Iterable, Optional, Tuple, Union, Sequence, List
from django.db.models.sql.where import NothingNode
_NodeChildren = Iterable[Union["Node", NothingNode, Sequence[Any]]]
class Node:
children: List[Any]
default: Any = ...
connector: str = ...
negated: bool = ...
def __init__(
self, children: Optional[_NodeChildren] = ..., connector: Optional[str] = ..., negated: bool = ...
) -> None: ...
def __deepcopy__(self, memodict: Dict[Any, Any]) -> Node: ...
def __len__(self) -> int: ...
def __bool__(self) -> bool: ...
def __contains__(self, other: Tuple[str, int]) -> bool: ...
def __hash__(self) -> int: ...
def add(self, data: Any, conn_type: str, squash: bool = ...) -> Any: ...
def negate(self) -> None: ...
|
5b0001ce94664d22bad6995c8753ea730147a0a2
|
8f3b307a592ee4a811c85f9491d7cba912a7d7f8
|
/src/ontology/util_taxon_conversion.py
|
42485eadcd8e99c81a5730da811a72b9b2bfaac8
|
[
"CC-BY-4.0"
] |
permissive
|
FoodOntology/foodon
|
3e71c1098dc6436a4ab1b735b13e5365a9a49a6c
|
6bea195380238d5c16f6e60ae7c24988b104f5e8
|
refs/heads/master
| 2023-08-07T16:20:16.828032
| 2023-08-04T17:53:50
| 2023-08-04T17:53:50
| 54,141,883
| 163
| 35
|
CC-BY-4.0
| 2023-06-21T18:39:38
| 2016-03-17T18:26:39
|
Python
|
UTF-8
|
Python
| false
| false
| 8,311
|
py
|
util_taxon_conversion.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# A delicate script to deprecate FoodON plant and animal organisms when they
# have a subclass axiom " 'in taxon' some [NCBITaxon organism]". The subclass
# axiom is removed, and the class is converted to be about the NCBITaxon
# organism so that all Foodon annotations and other clauses about it are kept.
# References to the existing term are switched to the new NCBITaxon term.
# A deprecation entry is created for the existing FoodOn class.
#
# The initial run of this in Sept 2020 converted about 1782 classes.
# Its ok to re-run this - it will do the conversion on any new plant or animal
# that has an 'in taxon' link to an NCBITaxon.
#
# Classes that have 'in taxon' (x or y or z...) are left alone.
#
# NOTE: Trick with result = elementTree.find(...)
# - it can only be tested with "if result != None:""
#
# NOTE: Currently this cannot be run twice on the same i/o file because save
# in first pass the xml: entity prefix is not included in entity declaration
# section but file content still contains this prefix, so searches for xml:...
# error out.
#
# NOTE: In case one needs to retrieve a good deprecation_import.owl file:
# This one not infected with output of 'in taxon' script:
# git show f0aed4b:src/ontology/imports/deprecated_import.owl > imports/deprecation_import.owl
#
#
# Order of operations:
# python util_taxon_conversion.py
# python util_obsoletion_update.py imports/deprecation_import.owl foodon-edit.owl
# python util_obsoletion_update.py imports/deprecation_import.owl imports/foodon_product_import.owl
# see https://stackabuse.com/reading-and-writing-xml-files-in-python/
import xml.etree.ElementTree as ET
import os
# .owl file to store new deprecations in
deprecated_file_path = 'imports/deprecation_import.owl';
# .owl file to look for items to be converted from foodon to ncbitaxon.
input_file_path = 'foodon-edit.owl'
output_file_path = input_file_path;
# Preserve comments in XML files:
# https://stackoverflow.com/questions/33573807/faithfully-preserve-comments-in-parsed-xml
#class CommentedTreeBuilder(ET.TreeBuilder):
# def comment(self, data):
# self.start(ET.Comment, {})
# self.data(data)
# self.end(ET.Comment)
#
#parser = ET.XMLParser(target=CommentedTreeBuilder())
#Python 3.8
#parser = ET.XMLParser(target=ET.TreeBuilder(insert_comments=True))
# problem is it errors out on owl rdf/xml
# Had to dig for this code to re-establish namespace from given XML file:
# Have to fetch existing file's dictionary of prefix:uri namespaces
namespace = dict([
node for (_, node) in ET.iterparse(input_file_path, events=['start-ns'])
])
# Oddly this one can get dropped on write of file, so must add it:
namespace['xml'] = 'http://www.w3.org/XML/1998/namespace';
for prefix in namespace:
ET.register_namespace(prefix, namespace[prefix]);
tree = ET.parse(input_file_path);
root = tree.getroot();
deprecations = ET.parse(deprecated_file_path); # replaced ET.parse()
deprecation_root = deprecations.getroot();
# For working with ElementTree attributes, it seems we need to use this format of namespace:
ns = {
'owl': '{http://www.w3.org/2002/07/owl#}',
'rdfs': '{http://www.w3.org/2000/01/rdf-schema#}',
'rdf': '{http://www.w3.org/1999/02/22-rdf-syntax-ns#}',
'obo': '{http://purl.obolibrary.org/obo/}'
}
#
# IN deprecation_root owl file, add an XML/RDF owl:Class about about_uri, with
# replaced_by link to owl_taxon_uri.
#
def deprecate_term(deprecation_root, about_uri, label, owl_taxon_uri):
# One extra check could be done to ensure duplicate deprecation not added.
obs_element = deprecation_root.makeelement('owl:Class', {'rdf:about': about_uri});
deprecated = obs_element.makeelement('owl:deprecated', {'rdf:datatype': 'http://www.w3.org/2001/XMLSchema#boolean'});
deprecated.text = 'true';
obs_element.append(deprecated);
obs_label = obs_element.makeelement('rdfs:label', {'xml:lang':'en'});
obs_label.text = 'obsolete: ' + label.text;
obs_element.append(obs_label);
# "replaced by" (IAO:0100001) taxonomy term.
replaced = obs_element.makeelement('obo:IAO_0100001', {'rdf:resource':owl_taxon_uri});
obs_element.append(replaced);
deprecation_root.append(obs_element);
rdf_resource_lookup = {};
# Create index on all owl:Class/rdfs:subClassOf[@rdf:resource tags; only needs 1 entry
for tag in root.findall('owl:Class/rdfs:subClassOf[@rdf:resource]', namespace):
rdf_resource_lookup[tag.attrib['{rdf}resource'.format(**ns)]] = [tag];
# For all classes in main ontology file, see if they are FoodOn uri's and have
# an "'in taxon' some [taxon]" axiom, and if so, convert class to be about
# [taxon], and deprecate the class'es existing uri in deprecated terms owl file
count = 0;
for owl_class in root.findall('owl:Class', namespace):
about = owl_class.get('{rdf}about'.format(**ns)); # owl_class.attrib['{rdf}about'.format(**ns)];
if (about and 'FOODON_' in about):
# ONLY DO TAXON CONVERSION IF THIS CLASS HAS NO EXPLICIT SUBCLASSES.
# PARENT CONVERSIONS MUST BE MANUALLY REVIEWED - TOO OFTEN THEY HAVE
# THEMSELVES AS A CHILD if 'in taxon' Y is too general.
if about in rdf_resource_lookup:
continue;
# Here we're only dealing with a leaf (no subclasses)
for owl_subclassof in owl_class.findall('rdfs:subClassOf', namespace):
for owl_restriction in owl_subclassof.findall('owl:Restriction', namespace):
# Find 'in taxon'
owl_property = owl_restriction.find('owl:onProperty[@rdf:resource = "http://purl.obolibrary.org/obo/RO_0002162"]', namespace)
if owl_property != None:
owl_taxon = owl_restriction.find('owl:someValuesFrom[@rdf:resource]', namespace);
if owl_taxon != None:
owl_taxon_uri = owl_taxon.attrib['{rdf}resource'.format(**ns)];
label = owl_class.find('rdfs:label[@xml:lang="en"]', namespace);
if label != None:
# Not converting items that are animal /human as consumer
if label.text.find('consumer') != -1:
print ("Skipping consumer ", label.text);
continue;
alt_term = owl_class.makeelement('obo:IAO_0000118', {'xml:lang': 'en'});
alt_term.text = label.text;
owl_class.append(alt_term);
owl_class.remove(label);
# HERE WE MAKE CHANGES
# FoodOn plant and animal organism may have duplicate dbxref to taxon:
taxon_xref = owl_class.find('oboInOwl:hasDbXref[@rdf:resource = "'+owl_taxon_uri+'"]', namespace)
if taxon_xref != None:
#print ('found dbxref')
owl_class.remove(taxon_xref);
# Remove existing rdf:about and add new one to class:
owl_class.attrib.pop('{rdf}about'.format(**ns), None)
owl_class.set('rdf:about', owl_taxon_uri);
# Remove 'in taxon' some NCBITaxon axiom
owl_class.remove(owl_subclassof);
# Prepare the obsoleted FoodOn class
deprecate_term(deprecation_root, about, label, owl_taxon_uri);
count += 1;
else:
print ("Skipped ", about, "as it has multiple taxa expression");
print ('Processed', count , 'taxa conversions.');
# 2nd pass eliminates synonomy tags and IAO:0000118 alternate term tags that match rdfs:label
for owl_class in root.findall('owl:Class', namespace):
label_node = owl_class.find('rdfs:label[@xml:lang="en"]', namespace);
if label_node != None:
label = label_node.text.lower();
# Housecleaning: get rid of synonyms that match label
# See https://docs.python.org/2/library/xml.etree.elementtree.html
for synonymy in ['oboInOwl:hasSynonym','oboInOwl:hasExactSynonym', 'obo:IAO_0000118']:
for synonym in owl_class.findall(synonymy, namespace):
if not synonym.text:
# Sometimes synonym has URI by accident instead of text
print ("Error in ", synonymy, "in",label);
pass
elif synonym.text.lower() == label:
print ("Found duplicate", synonymy, label);
owl_class.remove(synonym);
if (count > 0):
tree.write(output_file_path, xml_declaration=True, encoding='utf-8', method="xml", );
cmd = f'robot reduce -i {output_file_path} -r ELK -o {output_file_path}' # Note no --xml-entities
os.system(cmd)
deprecations.write(deprecated_file_path, xml_declaration=True, encoding='utf-8', method="xml");
cmd = f'robot reduce -i {deprecated_file_path} -r ELK -o {deprecated_file_path}' # Note no --xml-entities
os.system(cmd)
|
b399e7618f08382e31771816010e0fc8c61cbccf
|
e7efae2b83216d9621bd93390959d652de779c3d
|
/mongo/datadog_checks/mongo/collectors/db_stat.py
|
86451b428f78cde7e7faebb9b4166b2e521f1858
|
[
"BSD-3-Clause",
"MIT",
"BSD-3-Clause-Modification",
"Unlicense",
"Apache-2.0",
"LGPL-3.0-only",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
DataDog/integrations-core
|
ee1886cc7655972b2791e6ab8a1c62ab35afdb47
|
406072e4294edff5b46b513f0cdf7c2c00fac9d2
|
refs/heads/master
| 2023-08-31T04:08:06.243593
| 2023-08-30T18:22:10
| 2023-08-30T18:22:10
| 47,203,045
| 852
| 1,548
|
BSD-3-Clause
| 2023-09-14T16:39:54
| 2015-12-01T16:41:45
|
Python
|
UTF-8
|
Python
| false
| false
| 1,763
|
py
|
db_stat.py
|
# (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from datadog_checks.mongo.collectors.base import MongoCollector
from datadog_checks.mongo.common import MongosDeployment, ReplicaSetDeployment
class DbStatCollector(MongoCollector):
"""Collects database statistics using the 'dbstats' mongo command. This collector can be instantiated multiple
times, for each database to monitor.
Metrics are tagged with the database name so they don't overlap with each other.
"""
def __init__(self, check, db_name, tags):
super(DbStatCollector, self).__init__(check, tags)
self.db_name = db_name
def compatible_with(self, deployment):
# Can theoretically be run on any node as long as it contains data.
# i.e Arbiters are ruled out
if self.db_name == 'local':
if isinstance(deployment, ReplicaSetDeployment) and deployment.is_arbiter:
self.log.debug("DbStatCollector can only be run on mongod nodes, arbiter node detected.")
return False
if isinstance(deployment, MongosDeployment):
self.log.debug("DbStatCollector can only be run on mongod nodes, mongos deployment detected.")
return False
return True
else:
return deployment.is_principal()
def collect(self, api):
db = api[self.db_name]
# Submit the metric
additional_tags = [
u"cluster:db:{0}".format(self.db_name), # FIXME: 8.x, was kept for backward compatibility
u"db:{0}".format(self.db_name),
]
stats = {'stats': db.command('dbstats')}
return self._submit_payload(stats, additional_tags)
|
49044f15feda126a9d1155c96463441c788ec6ac
|
3abc1fef99ac6ce0b845a1090fae7f6875fee729
|
/src/ralph/lib/custom_fields/tests/api.py
|
cf361d9ecf336e583524dcfae66a39d113c8c3ea
|
[
"Apache-2.0"
] |
permissive
|
allegro/ralph
|
5ff9165a202e836061c99e8af20214e0d651622f
|
b4a72356f527b1f12c7babd7465d2d7fa3ffb0d3
|
refs/heads/ng
| 2023-09-02T01:13:43.672554
| 2023-09-01T09:48:38
| 2023-09-01T09:48:38
| 4,359,038
| 1,970
| 617
|
Apache-2.0
| 2023-09-01T09:44:39
| 2012-05-17T14:04:57
|
Python
|
UTF-8
|
Python
| false
| false
| 964
|
py
|
api.py
|
from django.conf.urls import include, url
from rest_framework import routers, serializers, viewsets
from ..api import (
CustomFieldsFilterBackend,
NestedCustomFieldsRouterMixin,
WithCustomFieldsSerializerMixin
)
from .models import SomeModel
class SomeModelSerializer(
WithCustomFieldsSerializerMixin, serializers.Serializer
):
class Meta:
model = SomeModel
fields = ('id', 'custom_fields', 'configuration_variables')
class SomeModelViewset(viewsets.ModelViewSet):
queryset = SomeModel.objects.prefetch_related('custom_fields')
serializer_class = SomeModelSerializer
filter_backends = (
viewsets.ModelViewSet.filter_backends + [CustomFieldsFilterBackend]
)
class CustomFieldsAPITestsRouter(
NestedCustomFieldsRouterMixin, routers.DefaultRouter
):
pass
router = CustomFieldsAPITestsRouter()
router.register(r'somemodel', SomeModelViewset)
urlpatterns = [url(r'^', include(router.urls))]
|
bddc34d7290bcdb8c71b9a45038c5eac9fbf0889
|
eda6e7b8f399dedcdb960f4b48a2134b978f8d83
|
/bnpy/mergemove/zzzdeprecated/MergePlanner.py
|
04557098a4481ac25cf8fee6ddc75c8ab2765647
|
[
"BSD-3-Clause"
] |
permissive
|
bnpy/bnpy
|
8ed61bc4fe2f0ed99e0254c11a21c27c0cee59b2
|
ffc2242427451aa6a61dcac1473c47577a5ade6f
|
refs/heads/master
| 2023-08-16T06:49:58.716279
| 2022-10-15T15:59:12
| 2022-10-15T15:59:12
| 75,731,181
| 197
| 54
|
NOASSERTION
| 2023-07-21T20:59:10
| 2016-12-06T12:56:07
|
Python
|
UTF-8
|
Python
| false
| false
| 17,920
|
py
|
MergePlanner.py
|
'''
MergePlanner.py
Contains methods necessary for advanced selection of which components to merge.
'''
import numpy as np
from collections import defaultdict
from bnpy.util import isEvenlyDivisibleFloat
import bnpy.mergemove.MergeLogger as MergeLogger
# Constant defining how far calculated ELBO gap can be from zero
# and still be considered accepted or favorable
from bnpy.mergemove.MergeMove import ELBO_GAP_ACCEPT_TOL
CountTracker = defaultdict(int)
def preselectPairs(curModel, SS, lapFrac,
mergePairSelection='wholeELBO',
prevScoreMat=None,
mergeScoreRefreshInterval=10,
mergeMaxDegree=5, **kwargs):
''' Create list of candidate pairs for merge
'''
needRefresh = isEvenlyDivisibleFloat(lapFrac, mergeScoreRefreshInterval)
if prevScoreMat is None or needRefresh:
ScoreMat = np.zeros((SS.K, SS.K))
doAllPairs = 1
else:
assert prevScoreMat.shape[0] == SS.K
ScoreMat = prevScoreMat
doAllPairs = 0
ScoreMat = updateScoreMat_wholeELBO(ScoreMat, curModel, SS, doAllPairs)
posMask = ScoreMat > - ELBO_GAP_ACCEPT_TOL
Nvec = SS.getCountVec()
tinyVec = Nvec < 25
tinyMask = np.add(tinyVec, tinyVec[:, np.newaxis])
posAndTiny = np.logical_and(posMask, tinyMask)
posAndBothBig = np.logical_and(posMask, 1 - tinyMask)
# Select list of pairs to track for merge
# prioritizes merges that make big changes
# avoids tracking too many pairs that involves same node
pairsBig = selectPairsUsingAtMostNOfEachComp(posAndBothBig,
N=mergeMaxDegree)
scoresBig = np.asarray([ScoreMat[a, b] for (a, b) in pairsBig])
pairsBig = [pairsBig[x] for x in np.argsort(-1 * scoresBig)]
pairsTiny = selectPairsUsingAtMostNOfEachComp(posAndTiny, pairsBig,
N=mergeMaxDegree,
Nextra=2)
scoresTiny = np.asarray([ScoreMat[a, b] for (a, b) in pairsTiny])
pairsTiny = [pairsTiny[x] for x in np.argsort(-1 * scoresTiny)]
return pairsBig + pairsTiny, ScoreMat
def calcDegreeFromEdgeList(pairIDs, nNode):
''' Calculate degree of each node given edge list
Returns
-------
degree : 1D array, size nNode
degree[k] counts number of edges that node k appears in
'''
degree = np.zeros(nNode, dtype=np.int32)
for n in range(nNode):
degree[n] = np.sum([n in pair for pair in pairIDs])
return degree
def selectPairsUsingAtMostNOfEachComp(AdjMat, extraFixedEdges=None,
N=3, Nextra=0):
'''
Args
--------
AdjMat : 2D array, size K x K
N : max degree of each node
Returns
--------
pairIDs : list of tuples, one entry per selected pair
'''
if np.sum(AdjMat) == 0:
return list()
# AMat :
# tracks all remaining CANDIDATE edges where both node under the degree
# limit.
AMat = AdjMat.copy()
xdegree = np.zeros(AdjMat.shape[0], dtype=np.int32)
if extraFixedEdges is not None:
for kA, kB in extraFixedEdges:
xdegree[kA] += 1
xdegree[kB] += 1
# degree : tracks CANDIDATE edges (including extra) that not excluded
# newdegree : tracks edges we will KEEP
newdegree = np.zeros_like(xdegree)
newdegree += xdegree
exhaustedMask = newdegree >= N
AMat[exhaustedMask, :] = 0
AMat[:, exhaustedMask] = 0
degree = np.sum(AMat, axis=0) + np.sum(AMat, axis=1) + xdegree
# Traverse comps from largest to smallest degree
pairIDs = list()
nodeOrder = np.argsort(-1 * degree)
for nodeID in nodeOrder:
# Get list of remaining possible partners for node
partners = np.flatnonzero(AMat[nodeID, :] + AMat[:, nodeID])
# Sort node's partners from smallest to largest degree,
# since we want to prioritize keeping small degree partners
partners = partners[np.argsort([degree[p] for p in partners])]
Ncur = N - newdegree[nodeID]
keepPartners = partners[:Ncur]
rejectPartners = partners[Ncur:]
for p in keepPartners:
kA = np.minimum(p, nodeID)
kB = np.maximum(p, nodeID)
pairIDs.append((kA, kB))
AMat[kA, kB] = 0 # make pair ineligible for future partnerships
newdegree[p] += 1
newdegree[nodeID] += 1
for p in rejectPartners:
kA = np.minimum(p, nodeID)
kB = np.maximum(p, nodeID)
AMat[kA, kB] = 0 # make pair ineligible for future partnerships
degree[p] -= 1
degree[nodeID] -= 1
exhaustedMask = newdegree >= N
AMat[exhaustedMask, :] = 0
AMat[:, exhaustedMask] = 0
degree = np.sum(AMat, axis=0) + np.sum(AMat, axis=1) + xdegree
cond1 = np.allclose(degree, xdegree)
cond2 = np.max(newdegree) <= N + Nextra
if not cond1:
print('WARNING: BAD DEGREE CALCULATION')
if not cond2:
print('WARNING: BAD NEWDEGREE CALCULATION')
print('max(newdegree)=%d' % (np.max(newdegree)))
print('N + Nextra: %d' % (N + Nextra))
return pairIDs
def updateScoreMat_wholeELBO(ScoreMat, curModel, SS, doAllPairs=0):
''' Calculate upper-tri matrix of exact ELBO gap for each candidate pair
Returns
---------
Mraw : 2D array, size K x K. Uppert tri entries carry content.
Mraw[j,k] gives the scalar ELBO gap for the potential merge of j,k
'''
K = SS.K
if doAllPairs:
AGap = curModel.allocModel.calcHardMergeGap_AllPairs(SS)
OGap = curModel.obsModel.calcHardMergeGap_AllPairs(SS)
ScoreMat = AGap + OGap
ScoreMat[np.tril_indices(SS.K)] = -np.inf
for k, uID in enumerate(SS.uIDs):
CountTracker[uID] = SS.getCountVec()[k]
nUpdated = SS.K * (SS.K - 1) / 2
else:
ScoreMat[np.tril_indices(SS.K)] = -np.inf
# Rescore only specific pairs that are positive
redoMask = ScoreMat > -1 * ELBO_GAP_ACCEPT_TOL
for k, uID in enumerate(SS.uIDs):
if CountTracker[uID] == 0:
# Always precompute for brand-new comps
redoMask[k, :] = 1
redoMask[:, k] = 1
else:
absDiff = np.abs(SS.getCountVec()[k] - CountTracker[uID])
percDiff = absDiff / (CountTracker[uID] + 1e-10)
if percDiff > 0.25:
redoMask[k, :] = 1
redoMask[:, k] = 1
CountTracker[uID] = SS.getCountVec()[k]
redoMask[np.tril_indices(SS.K)] = 0
aList, bList = np.unravel_index(np.flatnonzero(redoMask), (SS.K, SS.K))
if len(aList) > 0:
mPairIDs = list(zip(aList, bList))
AGap = curModel.allocModel.calcHardMergeGap_SpecificPairs(
SS, mPairIDs)
OGap = curModel.obsModel.calcHardMergeGap_SpecificPairs(
SS, mPairIDs)
ScoreMat[aList, bList] = AGap + OGap
nUpdated = len(aList)
MergeLogger.log('MERGE ScoreMat Updates: %d entries.' % (nUpdated),
level='debug')
return ScoreMat
def preselect_candidate_pairs(curModel, SS,
randstate=np.random.RandomState(0),
mergePairSelection='random',
mergePerLap=10,
doLimitNumPairs=1,
M=None,
**kwargs):
''' Get a list of tuples representing candidate pairs to merge.
Args
--------
curModel : bnpy HModel
SS : bnpy SuffStatBag. If None, defaults to random selection.
randstate : numpy random number generator
mergePairSelection : name of procedure to select candidate pairs
mergePerLap : int number of candidates to identify
(may be less if K small)
Returns
--------
mPairList : list of tuples
each entry is a tuple of two integers
indicating component ID candidates for positions kA, kB
'''
kwargs['mergePairSelection'] = mergePairSelection
kwargs['randstate'] = randstate
if 'excludePairs' not in kwargs:
excludePairs = list()
else:
excludePairs = kwargs['excludePairs']
K = curModel.allocModel.K
if doLimitNumPairs:
nMergeTrials = mergePerLap + kwargs['mergeNumExtraCandidates']
else:
nMergeTrials = K * (K - 1) // 2
if SS is None: # Handle first lap
kwargs['mergePairSelection'] = 'random'
Mraw = None
# Score matrix
# M : 2D array, shape K x K
# M[j,k] = score for viability of j,k. Larger = better.
selectroutine = kwargs['mergePairSelection']
if kwargs['mergePairSelection'].count('random') > 0:
M = kwargs['randstate'].rand(K, K)
elif kwargs['mergePairSelection'].count('marglik') > 0:
M = calcScoreMatrix_marglik(curModel, SS, excludePairs)
elif kwargs['mergePairSelection'].count('wholeELBO') > 0:
M, Mraw = calcScoreMatrix_wholeELBO(curModel, SS, excludePairs, M=M)
elif kwargs['mergePairSelection'].count('corr') > 0:
# Use correlation matrix as score for selecting candidates!
if selectroutine.count('empty') > 0:
M = calcScoreMatrix_corrOrEmpty(SS)
elif selectroutine.count('degree') > 0:
M = calcScoreMatrix_corrLimitDegree(SS)
else:
M = calcScoreMatrix_corr(SS)
else:
raise NotImplementedError(kwargs['mergePairSelection'])
# Only upper-triangular indices are allowed.
M[np.tril_indices(K)] = 0
# Excluded pairs are not allowed.
M[list(zip(*excludePairs))] = 0
# Select candidates
aList, bList = _scorematrix2rankedlist_greedy(M, nMergeTrials)
# Return completed lists
assert len(aList) == len(bList)
assert len(aList) <= nMergeTrials
assert len(aList) <= K * (K - 1) // 2
assert np.all(np.asarray(aList) < np.asarray(bList))
if 'returnScoreMatrix' in kwargs and kwargs['returnScoreMatrix']:
if Mraw is None:
return list(zip(aList, bList)), M
else:
return list(zip(aList, bList)), Mraw
return list(zip(aList, bList))
def _scorematrix2rankedlist_greedy(M, nPairs, doKeepZeros=False):
''' Return the nPairs highest-ranked pairs in score matrix M
Args
-------
M : score matrix, K x K
should have only entries kA,kB where kA <= kB
Returns
--------
aList : list of integer ids for rows of M
bList : list of integer ids for cols of M
Example
---------
_scorematrix2rankedlist( [0 2 3], [0 0 1], [0 0 0], 3)
>> [ (0,2), (0,1), (1,2)]
'''
M = M.copy()
M[np.tril_indices(M.shape[0])] = - np.inf
Mflat = M.flatten()
sortIDs = np.argsort(-1 * Mflat)
# Remove any entries that are -Inf
sortIDs = sortIDs[Mflat[sortIDs] != -np.inf]
if not doKeepZeros:
# Remove any entries that are zero
sortIDs = sortIDs[Mflat[sortIDs] != 0]
bestrs, bestcs = np.unravel_index(sortIDs, M.shape)
return bestrs[:nPairs].tolist(), bestcs[:nPairs].tolist()
def calcScoreMatrix_wholeELBO(curModel, SS, excludePairs=list(), M=None):
''' Calculate upper-tri matrix of exact ELBO gap for each candidate pair
Returns
---------
M : 2D array, size K x K. Upper triangular entries carry the content.
M[j,k] is positive iff merging j,k improves the ELBO
0 otherwise
Mraw : 2D array, size K x K. Uppert tri entries carry content.
Mraw[j,k] gives the scalar ELBO gap for the potential merge of j,k
'''
K = SS.K
if M is None:
AGap = curModel.allocModel.calcHardMergeGap_AllPairs(SS)
OGap = curModel.obsModel.calcHardMergeGap_AllPairs(SS)
Mraw = AGap + OGap
nUpdated = (SS.K * (SS.K - 1)) / 2
else:
assert M.shape[0] == K
assert M.shape[1] == K
nZeroEntry = np.sum(M == 0) - K - K * (K - 1) / 2
assert nZeroEntry >= 0
aList, bList = _scorematrix2rankedlist_greedy(M, SS.K + nZeroEntry,
doKeepZeros=True)
pairList = list(zip(aList, bList))
AGap = curModel.allocModel.calcHardMergeGap_SpecificPairs(SS, pairList)
OGap = curModel.obsModel.calcHardMergeGap_SpecificPairs(SS, pairList)
M[aList, bList] = AGap + OGap
Mraw = M
nUpdated = len(pairList)
MergeLogger.log('MERGE ScoreMat Updates: %d entries.' % (nUpdated),
level='debug')
Mraw[np.triu_indices(K, 1)] += ELBO_GAP_ACCEPT_TOL
M = Mraw.copy()
M[M < 0] = 0
return M, Mraw
def calcScoreMatrix_corr(SS, MINCORR=0.05, MINVAL=1e-8):
''' Calculate Score matrix using correlation cues.
Returns
-------
CorrMat : 2D array, size K x K
CorrMat[j,k] = correlation coef for comps j,k
'''
K = SS.K
Smat = SS.getSelectionTerm('DocTopicPairMat')
svec = SS.getSelectionTerm('DocTopicSum')
nanIDs = np.isnan(Smat)
Smat[nanIDs] = 0
svec[np.isnan(svec)] = 0
offlimitcompIDs = np.logical_or(np.isnan(svec), svec < MINVAL)
CovMat = Smat / SS.nDoc - np.outer(svec / SS.nDoc, svec / SS.nDoc)
varc = np.diag(CovMat)
sqrtc = np.sqrt(varc)
sqrtc[offlimitcompIDs] = MINVAL
assert sqrtc.min() >= MINVAL
CorrMat = CovMat / np.outer(sqrtc, sqrtc)
# Now, filter to leave only *positive* entries in upper diagonal
# we shouldn't even bother trying to merge topics
# with negative or nearly zero correlations
CorrMat[np.tril_indices(K)] = 0
CorrMat[CorrMat < MINCORR] = 0
CorrMat[nanIDs] = 0
return CorrMat
def calcScoreMatrix_corrLimitDegree(SS, MINCORR=0.05, N=3):
''' Score candidate merge pairs favoring correlations.
Returns
-------
M : 2D array, size K x K
M[j,k] provides score in [0, 1] for each pair of comps (j,k)
larger score indicates better candidate for merge
'''
M = calcScoreMatrix_corr(SS)
thrvec = np.linspace(MINCORR, 1.0, 10)
fixedPairIDs = list()
for tt in range(thrvec.size - 1, 0, -1):
thrSm = thrvec[tt - 1]
thrBig = thrvec[tt]
A = np.logical_and(M > thrSm, M < thrBig)
pairIDs = selectPairsUsingAtMostNOfEachComp(A, fixedPairIDs, N=N)
fixedPairIDs = fixedPairIDs + pairIDs
Mlimit = np.zeros_like(M)
if len(fixedPairIDs) == 0:
return Mlimit
x, y = list(zip(*fixedPairIDs))
Mlimit[x, y] = M[x, y]
return Mlimit
def calcScoreMatrix_corrOrEmpty(SS, EMPTYTHR=100):
''' Score candidate merge pairs favoring correlations or empty components
Returns
-------
M : 2D array, size K x K
M[j,k] provides score in [0, 1] for each pair of comps (j,k)
larger score indicates better candidate for merge
'''
# 1) Use correlation scores
M = calcScoreMatrix_corr(SS)
# 2) Add in pairs of (large mass, small mass)
Nvec = None
if hasattr(SS, 'N'):
Nvec = SS.N
elif hasattr(SS, 'SumWordCounts'):
Nvec = SS.SumWordCounts
assert Nvec is not None
sortIDs = np.argsort(Nvec)
emptyScores = np.zeros(SS.K)
for ii in range(SS.K / 2):
worstID = sortIDs[ii]
bestID = sortIDs[-(ii + 1)]
if Nvec[worstID] < EMPTYTHR and Nvec[bestID] > EMPTYTHR:
# Want to prefer trying *larger* comps before smaller ones
# So boost the score of larger comps slightly
M[worstID, bestID] = 0.5 + 0.1 * Nvec[worstID] / Nvec.sum()
M[bestID, worstID] = 0.5 + 0.1 * Nvec[worstID] / Nvec.sum()
if Nvec[worstID] > EMPTYTHR:
break
emptyScores[worstID] = Nvec[worstID] / Nvec.sum()
# 3) Add in pairs of (small mass, small mass)
emptyIDs = np.flatnonzero(emptyScores)
nEmpty = emptyIDs.size
for jID in range(nEmpty - 1):
for kID in range(jID + 1, nEmpty):
j = emptyIDs[jID]
k = emptyIDs[kID]
M[j, k] = 0.4 + 0.1 * (emptyScores[j] + emptyScores[k])
return M
def calcScoreMatrix_marglik(curModel, SS, excludePairs):
K = SS.K
M = np.zeros((K, K))
excludeSet = set(excludePairs)
myCalculator = MargLikScoreCalculator()
for kA in range(K):
for kB in range(kA + 1, K):
if (kA, kB) not in excludeSet:
M[kA, kB] = myCalculator._calcMScoreForCandidatePair(
curModel, SS, kA, kB)
return M
class MargLikScoreCalculator(object):
''' Calculate marglik scores quickly by caching
'''
def __init__(self):
self.MScores = dict()
self.PairMScores = dict()
def _calcMScoreForCandidatePair(self, hmodel, SS, kA, kB):
logmA = self._calcLogMargLikForComp(hmodel, SS, kA)
logmB = self._calcLogMargLikForComp(hmodel, SS, kB)
logmAB = self._calcLogMargLikForPair(hmodel, SS, kA, kB)
return logmAB - logmA - logmB
def _calcLogMargLikForComp(self, hmodel, SS, kA):
if kA in self.MScores:
return self.MScores[kA]
mA = hmodel.obsModel.calcLogMargLikForComp(
SS, kA, doNormConstOnly=True)
self.MScores[kA] = mA
return mA
def _calcLogMargLikForPair(self, hmodel, SS, kA, kB):
if (kA, kB) in self.PairMScores:
return self.PairMScores[(kA, kB)]
elif (kB, kA) in self.PairMScores:
return self.PairMScores[(kB, kA)]
else:
mAB = hmodel.obsModel.calcLogMargLikForComp(
SS, kA, kB, doNormConstOnly=True)
self.PairMScores[(kA, kB)] = mAB
return mAB
|
ca92dd38111094f5a06ad0cbea0b20d7257ac4a2
|
568fa58296378fa129ab3349adf010daa44ed45b
|
/tests/st/ops/ascend/cube/test_depthwise_ad_001.py
|
49718ce50e6d110bf60f9ef8d9c76bb4c987bcad
|
[
"Apache-2.0",
"BSD-3-Clause",
"NCSA",
"X11-distribute-modifications-variant",
"Zlib",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"LLVM-exception",
"BSD-2-Clause"
] |
permissive
|
mindspore-ai/akg
|
37f471badc66de6a831f1f45ad84344f34d23ef2
|
99f33858d6972741748cbfc9ab0bf9600428fef7
|
refs/heads/master
| 2023-07-25T23:03:17.672665
| 2023-07-11T07:33:57
| 2023-07-11T07:33:57
| 274,077,856
| 319
| 36
|
Apache-2.0
| 2021-12-30T13:43:08
| 2020-06-22T08:09:05
|
Python
|
UTF-8
|
Python
| false
| false
| 2,525
|
py
|
test_depthwise_ad_001.py
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
unsortedsegmentsum test cast
"""
import os
import pytest
from tests.common.base import TestBase
from tests.common.test_run.ascend.depthwise_ad_run import depthwise_ad_run
class TestCase(TestBase):
def setup(self):
case_name = "test_autodiff_depthwise_001"
case_path = os.getcwd()
self.params_init(case_name, case_path)
self.caseresult = True
self._log.info("============= {0} Setup case============".format(self.casename))
self.testarg = [
# testflag, opfuncname, testRunArgs
# CO = CI
# group = CI // block_size
# N, H, CI, K, PAD, STRIDE, cutH, cutCo, cutM, cutK, cutN
("depthwise_ad_run_001", depthwise_ad_run, (16, 7, 7, 1024, 1, 3, 3, 1, 1, 1, 1, 7, 16, 512, 3 * 16, 16)),
]
self.testarg1 = [
("depthwise_ad_run_101", depthwise_ad_run, (16, 7, 7, 960, 1, 3, 3, 1, 1, 1, 1, 9, 16, 512, 3 * 16, 16)),
]
return
@pytest.mark.skip
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_run(self):
"""
run case.#
:return:
"""
self.common_run(self.testarg)
@pytest.mark.skip
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_run1(self):
"""
run case.#
:return:
"""
self.common_run(self.testarg1)
def teardown(self):
"""
clean environment
:return:
"""
self._log.info("============= {0} Teardown============".format(self.casename))
return
if __name__ == "__main__":
#a = TestCase("test_depthwise_ad_001", os.getcwd())
a = TestCase()
a.setup()
a.test_run()
a.teardown()
|
724d0ab96f0b973937e6723ab78f90059f33d139
|
d08cf46d3e16ab8e6a958731168469ba38daf069
|
/shenfun/matrixbase.py
|
d5fbaed47d69e4f308a28b4ffa3c19174c54f940
|
[
"BSD-2-Clause"
] |
permissive
|
spectralDNS/shenfun
|
ce808edc5258c896f2cccfbd88e67153e3f621c9
|
bcda39d8d8e4741df1cafe719d81733cc1024def
|
refs/heads/master
| 2023-07-27T20:29:57.075970
| 2023-07-11T12:33:04
| 2023-07-11T12:33:04
| 79,914,066
| 190
| 46
|
BSD-2-Clause
| 2022-05-11T19:10:33
| 2017-01-24T13:29:02
|
Python
|
UTF-8
|
Python
| false
| false
| 81,148
|
py
|
matrixbase.py
|
r"""
This module contains classes for working with sparse matrices.
"""
from __future__ import division
import functools
from copy import copy, deepcopy
from collections.abc import Mapping, MutableMapping
from collections import defaultdict
from numbers import Number
import numpy as np
import sympy as sp
from scipy.sparse import bmat, spmatrix, dia_matrix, csr_matrix, kron, \
diags as sp_diags
from scipy.integrate import quadrature
from mpi4py import MPI
from shenfun.config import config
from .utilities import integrate_sympy
__all__ = ['SparseMatrix', 'SpectralMatrix', 'extract_diagonal_matrix',
'extract_bc_matrices', 'check_sanity', 'assemble_sympy',
'TPMatrix', 'BlockMatrix', 'BlockMatrices', 'Identity',
'get_simplified_tpmatrices', 'ScipyMatrix', 'SpectralMatDict']
comm = MPI.COMM_WORLD
class SparseMatrix(MutableMapping):
r"""Base class for sparse matrices.
The data is stored as a dictionary, where keys and values are, respectively,
the offsets and values of the diagonals. In addition, each matrix is stored
with a coefficient that is used as a scalar multiple of the matrix.
Parameters
----------
d : dict
Dictionary, where keys are the diagonal offsets and values the
diagonals
shape : two-tuple of ints
scale : number, optional
Scale matrix with this number
Note
----
The matrix format and storage is similar to Scipy's `dia_matrix`. The format is
chosen because spectral matrices often are computed by hand and presented
in the literature as banded matrices.
Note that a SparseMatrix can easily be transformed to any of Scipy's formats
using the `diags` method. However, Scipy's matrices are not implemented to
act along different axes of multidimensional arrays, which is required
for tensor product matrices, see :class:`.TPMatrix`. Hence the need for
this SparseMatrix class.
Examples
--------
A tridiagonal matrix of shape N x N could be created as
>>> from shenfun import SparseMatrix
>>> import numpy as np
>>> N = 4
>>> d = {-1: 1, 0: -2, 1: 1}
>>> S = SparseMatrix(d, (N, N))
>>> dict(S)
{-1: 1, 0: -2, 1: 1}
In case of variable values, store the entire diagonal. For an N x N
matrix use
>>> d = {-1: np.ones(N-1),
... 0: -2*np.ones(N),
... 1: np.ones(N-1)}
>>> S = SparseMatrix(d, (N, N))
>>> dict(S)
{-1: array([1., 1., 1.]), 0: array([-2., -2., -2., -2.]), 1: array([1., 1., 1.])}
"""
# pylint: disable=redefined-builtin, missing-docstring
def __init__(self, d, shape, scale=1):
# sort d before storing
sorted_dict = sorted(d.items())
self._storage = {si[0]: si[1] for si in sorted_dict}
self.shape = shape
self._diags = dia_matrix((1, 1))
self.scale = scale
self._matvec_methods = []
self.solver = None
def matvec(self, v, c, format=None, axis=0):
"""Matrix vector product
Returns c = dot(self, v)
Parameters
----------
v : array
Numpy input array of ndim>=1
c : array
Numpy output array of same shape as v
format : str, optional
Choice for computation
- csr - Compressed sparse row format
- dia - Sparse matrix with DIAgonal storage
- python - Use numpy and vectorization
- self - To be implemented in subclass
- cython - Cython implementation that may be implemented in subclass
- numba - Numba implementation that may be implemented in subclass
Using ``config['matrix']['sparse']['matvec']`` setting if format is None
axis : int, optional
The axis over which to take the matrix vector product
"""
format = config['matrix']['sparse']['matvec'] if format is None else format
N, M = self.shape
c.fill(0)
# Roll relevant axis to first
if axis > 0:
v = np.moveaxis(v, axis, 0)
c = np.moveaxis(c, axis, 0)
if format == 'python':
for key, val in self.items():
if np.ndim(val) > 0: # broadcasting
val = val[(slice(None), ) + (np.newaxis,)*(v.ndim-1)]
if key < 0:
c[-key:min(N, M-key)] += val*v[:min(M, N+key)]
else:
c[:min(N, M-key)] += val*v[key:min(M, N+key)]
c *= self.scale
else:
diags = self.diags(format=format)
P = int(np.prod(v.shape[1:]))
y = diags.dot(v[:M].reshape(M, P)).squeeze()
d = tuple([slice(0, m) for m in y.shape])
c[d] = y.reshape(c[d].shape)
if axis > 0:
c = np.moveaxis(c, 0, axis)
v = np.moveaxis(v, 0, axis)
return c
def diags(self, format=None, scaled=True):
"""Return a regular sparse matrix of specified format
Parameters
----------
format : str, optional
Choice of matrix type (see scipy.sparse.diags)
- dia - Sparse matrix with DIAgonal storage
- csr - Compressed sparse row
- csc - Compressed sparse column
Using ``config['matrix']['sparse']['diags']`` setting if format is None
scaled : bool, optional
Return matrix scaled by the constant self.scale if True
Note
----
This method returns the matrix scaled by self.scale if keyword scaled
is True.
"""
format = config['matrix']['sparse']['diags'] if format is None else format
self.sort()
self._diags = sp_diags(list(self.values()),
list(self.keys()),
shape=self.shape, format=format)
scale = self.scale
if isinstance(scale, np.ndarray):
scale = np.atleast_1d(scale).item()
return self._diags*scale if scaled else self._diags
def sort(self):
self._storage = {si[0]: si[1] for si in sorted(self.items())}
def __getitem__(self, key):
v = self._storage[key]
if hasattr(v, '__call__'):
return v(key)
return v
def __delitem__(self, key):
del self._storage[key]
def __setitem__(self, key, val):
self._storage[key] = val
def __iter__(self):
return iter(self._storage)
def __len__(self):
return len(self._storage)
def __eq__(self, a):
if self.shape != a.shape:
return False
if not self.same_keys(a):
return False
d0 = self.diags('csr', False).data
a0 = a.diags('csr', False).data
if d0.shape[0] != a0.shape[0]:
return False
if not np.linalg.norm(d0-a0) < 1e-8:
return False
return True
def __neq__(self, a):
return not self.__eq__(a)
def __imul__(self, y):
"""self.__imul__(y) <==> self*=y"""
assert isinstance(y, Number)
self.scale *= y
return self
def __mul__(self, y):
"""Returns copy of self.__mul__(y) <==> self*y"""
if isinstance(y, Number):
c = self.copy()
c.scale *= y
return c
elif isinstance(y, np.ndarray):
c = np.empty_like(y)
c = self.matvec(y, c)
return c
elif isinstance(y, SparseMatrix):
return self.diags('csc')*y.diags('csc')
raise RuntimeError
def __rmul__(self, y):
"""Returns copy of self.__rmul__(y) <==> y*self"""
return self.__mul__(y)
def __div__(self, y):
"""Returns elementwise division if `y` is a Number, or a linear algebra
solve if `y` is an array.
Parameters
----------
y : Number or array
"""
if isinstance(y, Number):
assert abs(y) > 1e-8
c = self.copy()
c.scale /= y
return c
elif isinstance(y, np.ndarray):
b = np.zeros_like(y)
b = self.solve(y, b)
return b
else:
raise NotImplementedError
def __truediv__(self, y):
"""Returns copy self.__div__(y) <==> self/y"""
return self.__div__(y)
def __add__(self, d):
"""Return copy of self.__add__(y) <==> self+d"""
if abs(self.scale) < 1e-15 and abs(d.scale) < 1e-15:
f = SparseMatrix({0: 0}, self.shape)
elif abs(self.scale) < 1e-15:
f = SparseMatrix(deepcopy(dict(d)), d.shape, d.scale)
elif abs(d.scale) < 1e-15:
f = self.copy()
else:
assert isinstance(d, Mapping)
f = SparseMatrix(deepcopy(dict(self)), self.shape, self.scale)
f.incorporate_scale()
d.incorporate_scale()
for key, val in d.items():
if key in f:
f[key] = f[key] + val
else:
f[key] = val
return f
def __iadd__(self, d):
"""self.__iadd__(d) <==> self += d"""
assert isinstance(d, Mapping)
assert d.shape == self.shape
if abs(d.scale) < 1e-16:
return self
elif abs(self.scale) < 1e-16:
self.clear()
for key, val in d.items():
self[key] = val
self.scale = d.scale
return self
self.incorporate_scale()
d.incorporate_scale()
for key, val in d.items():
if key in self:
self[key] = self[key] + val
else:
self[key] = val
return self
def __sub__(self, d):
"""Return copy of self.__sub__(d) <==> self-d"""
assert isinstance(d, Mapping)
if abs(self.scale) < 1e-15 and abs(d.scale) < 1e-15:
f = SparseMatrix({0: 0}, self.shape)
elif abs(self.scale) < 1e-15:
f = SparseMatrix(deepcopy(dict(d)), d.shape, -d.scale)
elif abs(d.scale) < 1e-15:
f = self.copy()
else:
f = SparseMatrix(deepcopy(dict(self)), self.shape, self.scale)
f.incorporate_scale()
d.incorporate_scale()
for key, val in d.items():
if key in f:
f[key] = f[key] - val
else:
f[key] = -val
return f
def __isub__(self, d):
"""self.__isub__(d) <==> self -= d"""
assert isinstance(d, Mapping)
assert d.shape == self.shape
if abs(d.scale) < 1e-16:
return self
elif abs(self.scale) < 1e-16:
self.clear()
for key, val in d.items():
self[key] = val
self.scale = -d.scale
return self
self.incorporate_scale()
d.incorporate_scale()
for key, val in d.items():
if key in self:
self[key] = self[key] - val
else:
self[key] = -val
return self
def copy(self):
"""Return SparseMatrix deep copy of self"""
return self.__deepcopy__()
def __copy__(self):
if self.__class__.__name__ == 'Identity':
return self
return SparseMatrix(copy(dict(self)), self.shape, self.scale)
def __deepcopy__(self, memo=None, _nil=[]):
if self.__class__.__name__ == 'Identity':
return Identity(self.shape, self.scale)
return SparseMatrix(deepcopy(dict(self)), self.shape, self.scale)
def __neg__(self):
"""self.__neg__() <==> -self"""
A = self.copy()
A.scale = self.scale*-1
return A
def __hash__(self):
return hash(frozenset(self))
def get_key(self):
return self.__hash__()
def same_keys(self, a):
return self.__hash__() == a.__hash__()
def scale_array(self, c, sc):
assert isinstance(sc, Number)
if abs(sc-1) > 1e-8:
c *= sc
def incorporate_scale(self):
"""Modifies matrix such that self.scale = 1"""
if abs(self.scale-1) < 1e-8:
return
if hasattr(self, '_keyscale'):
self._keyscale *= self.scale
else:
for key, val in self.items():
self[key] = val*self.scale
self.scale = 1
def sorted_keys(self):
return np.sort(np.array(list(self.keys())))
def solve(self, b, u=None, axis=0, constraints=()):
"""Solve matrix system Au = b
where A is the current matrix (self)
Parameters
----------
b : array
Array of right hand side on entry and solution on exit unless
u is provided.
u : array, optional
Output array
axis : int, optional
The axis over which to solve for if b and u are multi-
dimensional
constraints : tuple of 2-tuples
The 2-tuples represent (row, val)
The constraint indents the matrix row and sets b[row] = val
Note
----
Vectors may be one- or multidimensional.
"""
if self.solver is None:
self.solver = self.get_solver()(self)
u = self.solver(b, u=u, axis=axis, constraints=constraints)
return u
def get_solver(self):
"""Return appropriate solver for self
Note
----
Fall back on generic Solve, which is using Scipy sparse
matrices with splu/spsolve. This is still pretty fast.
"""
from .la import (Solve, TDMA, TDMA_O, FDMA, TwoDMA, ThreeDMA, PDMA,
DiagMA, HeptaDMA)
if len(self) == 1:
if list(self.keys())[0] == 0:
return DiagMA
elif len(self) == 2:
if np.all(self.sorted_keys() == (0, 2)):
return TwoDMA
elif len(self) == 3:
if np.all(self.sorted_keys() == (-2, 0, 2)):
return TDMA
elif np.all(self.sorted_keys() == (-1, 0, 1)) and self.issymmetric:
return TDMA_O
elif np.all(self.sorted_keys() == (0, 2, 4)):
return ThreeDMA
elif len(self) == 4:
if np.all(self.sorted_keys() == (-2, 0, 2, 4)):
return FDMA
elif len(self) == 5:
if np.all(self.sorted_keys() == (-4, -2, 0, 2, 4)):
return PDMA
elif len(self) == 7:
if np.all(self.sorted_keys() == (-4, -2, 0, 2, 4, 6, 8)):
return HeptaDMA
return Solve
def isdiagonal(self):
if len(self) == 1:
if 0 in self:
return True
return False
def isidentity(self):
if not len(self) == 1:
return False
if 0 not in self:
return False
d = self[0]
if np.all(d == 1):
return True
return False
@property
def issymmetric(self):
#M = self.diags()
#return (abs(M-M.T) > 1e-8).nnz == 0 # too expensive
if np.sum(np.array(list(self.keys()))) != 0:
return False
for key, val in self.items():
if key <= 0:
continue
if not np.all(abs(val-self[-key]) < 1e-16):
return False
return True
def simplify_diagonal_matrices(self):
if self.isdiagonal():
self.scale = self.scale*self[0]
self[0] = 1
def clean_diagonals(self, reltol=1e-8):
"""Eliminate essentially zerovalued diagonals
Parameters
----------
reltol : number
Relative tolerance
"""
a = self * np.ones(self.shape[1])
relmax = abs(a).max() / self.shape[1]
if relmax == 0:
relmax = 1
list_keys = []
for key, val in self.items():
if abs(np.linalg.norm(val))/relmax < reltol:
list_keys.append(key)
for key in list_keys:
del self[key]
return self
def is_bc_matrix(self):
return False
class SpectralMatrix(SparseMatrix):
r"""Base class for inner product matrices.
Parameters
----------
test : 2-tuple of (basis, int)
The basis is an instance of a class for one of the bases in
- :mod:`.legendre.bases`
- :mod:`.chebyshev.bases`
- :mod:`.chebyshevu.bases`
- :mod:`.ultraspherical.bases`
- :mod:`.fourier.bases`
- :mod:`.laguerre.bases`
- :mod:`.hermite.bases`
- :mod:`.jacobi.bases`
The int represents the number of times the test function
should be differentiated. Representing matrix column.
trial : 2-tuple of (basis, int)
As test, but representing matrix column.
scale : number, optional
Scale matrix with this number
measure : number or Sympy expression, optional
A function of the reference coordinate.
assemble : None or str, optional
Determines how to perform the integration,
- 'quadrature' (default)
- 'exact'
- 'adaptive'
Exact and adaptive should result in the same matrix. Exact computes the
integral using `Sympy integrate <https://docs.sympy.org/latest/modules/integrals/integrals.html>`_,
whereas adaptive makes use of adaptive quadrature through `scipy <https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.quadrature.html>`_.
kind : None or str, optional
Alternatie kinds of methods.
- 'implemented' - Hardcoded implementations
- 'stencil' - Use orthogonal bases and stencil-matrices
- 'vandermonde' - Use Vandermonde matrix
The default is to first try to look for implemented kind, and if that
fails try first 'stencil' and then finally fall back on vandermonde.
Vandermonde creates a dense matrix of size NxN, so it should be avoided
(e.g., by implementing the matrix) for large N.
fixed_resolution : None or str, optional
A fixed number of quadrature points used to compute the matrix.
If 'fixed_resolution' is set, then assemble is set to 'quadrature' and
kind is set to 'vandermonde'.
Examples
--------
Mass matrix for Chebyshev Dirichlet basis:
.. math::
(\phi_k, \phi_j)_w = \int_{-1}^{1} \phi_k(x) \phi_j(x) w(x) dx
Stiffness matrix for Chebyshev Dirichlet basis:
.. math::
(\phi_k'', \phi_j)_w = \int_{-1}^{1} \phi_k''(x) \phi_j(x) w(x) dx
The matrices can be automatically created using, e.g., for the mass
matrix of the Dirichlet space::
>>> from shenfun import FunctionSpace, SpectralMatrix
>>> SD = FunctionSpace(16, 'C', bc=(0, 0))
>>> M = SpectralMatrix((SD, 0), (SD, 0))
where the first (SD, 0) represents the test function and
the second the trial function. The stiffness matrix can be obtained as::
>>> A = SpectralMatrix((SD, 0), (SD, 2))
where (SD, 2) signals that we use the second derivative of this trial
function. A more natural way to do the same thing is to
>>> from shenfun import TrialFunction, TestFunction, inner, Dx
>>> u = TrialFunction(SD)
>>> v = TestFunction(SD)
>>> A = inner(v, Dx(u, 0, 2))
where :func:`Dx` is a partial (or ordinary) derivative.
"""
def __init__(self, test, trial, scale=1.0, measure=1, assemble=None,
kind=None, fixed_resolution=None):
assert isinstance(test[1], (int, np.integer))
assert isinstance(trial[1], (int, np.integer))
self.testfunction = test
self.trialfunction = trial
self.measure = measure
shape = (test[0].dim(), trial[0].dim())
if isinstance(measure, Number):
scale *= measure
self.measure = measure = 1
if assemble is None or fixed_resolution is not None:
assemble = 'quadrature'
d = {}
_assembly_method = assemble
if assemble == 'exact':
d = self.assemble(assemble) # Look for implemented exact matrix
if d is None:
d = _get_matrix(test, trial, measure, assemble=assemble)
elif assemble == 'adaptive':
d = _get_matrix(test, trial, measure, assemble=assemble)
else:
if fixed_resolution is not None:
kind = 'vandermonde'
if kind is None:
# If nothing is specified then
# 1. Check for specific implementation
# 2. Try to use stencil-method. This may fail because it does not
# cover all options, so
# 3. Fall back on Vandermonde quadrature in case the above fails.
d = self.assemble(assemble)
if d is not None:
_assembly_method += '_implemented'
if d is None:
if test[0].family() == 'fourier':
kind = 'vandermonde'
else:
if test[0].family() != trial[0].family():
kind = 'vandermonde'
elif test[0].short_name() in ('P1', 'P2', 'P3', 'P4'):
try:
d = assemble_phi(test, trial, measure)
_assembly_method += '_phi'
except AssertionError:
kind = 'vandermonde'
else:
if test[0].is_jacobi and sp.sympify(measure).is_polynomial() and not (test[0].is_orthogonal and trial[0].is_orthogonal):
d = assemble_stencil(test, trial, measure)
_assembly_method += '_stencil'
else:
kind = 'vandermonde'
if kind is not None:
# Specified method of assembly, mainly for testing
if kind == 'implemented':
d = self.assemble(assemble)
_assembly_method += '_implemented'
elif kind == 'stencil':
assert sp.sympify(measure).is_polynomial(), 'Cannot use `stencil` with non-polynomial coefficients'
if test[0].short_name() in ('P1', 'P2', 'P3', 'P4'):
d = assemble_phi(test, trial, measure)
_assembly_method += '_phi'
else:
d = assemble_stencil(test, trial, measure)
_assembly_method += '_stencil'
elif kind == 'vandermonde':
d = _get_matrix(test, trial, measure, assemble='quadrature', fixed_resolution=fixed_resolution)
_assembly_method += '_vandermonde'
if test[0].domain_factor() != 1:
scale *= float(test[0].domain_factor())**(test[1]+trial[1]-1)
SparseMatrix.__init__(self, d, shape, scale)
self._assembly_method = _assembly_method
self.incorporate_scale()
def assemble(self, method):
r"""Return diagonals of :class:`.SpectralMatrix`
Parameters
----------
method : str
Type of integration
- 'exact'
- 'quadrature'
Note
----
Subclass :class:`.SpectralMatrix` and overload this method in order
to provide a fast and accurate implementation of the matrix representing
an inner product. See the `matrix` modules in either one of
- :mod:`.legendre.matrix`
- :mod:`.chebyshev.matrix`
- :mod:`.chebyshevu.matrix`
- :mod:`.ultraspherical.matrix`
- :mod:`.fourier.matrix`
- :mod:`.laguerre.matrix`
- :mod:`.hermite.matrix`
- :mod:`.jacobi.matrix`
Example
-------
The mass matrix for Chebyshev polynomials is
.. math::
(T_j, T_i)_{\omega} = \frac{c_i \pi}{2}\delta_{ij},
where :math:`c_0=2` and :math:`c_i=1` for integer :math:`i>0`. We can
implement this as
>>> from shenfun import SpectralMatrix
>>> class Bmat(SpectralMatrix):
... def assemble(self, method):
... test, trial = self.testfunction, self.trialfunction
... ci = np.ones(test[0].N)
... ci[0] = 2
... if test[0].quad == 'GL' and method != 'exact':
... # Gauss-Lobatto quadrature inexact at highest polynomial order
... ci[-1] = 2
... return {0: ci*np.pi/2}
Here `{0: ci*np.pi/2}` is the 0'th diagonal of the matrix.
Note that `test` and `trial` are two-tuples of `(instance of :class:`.SpectralBase`, number)`,
where the number represents the number of derivatives. For the mass matrix
the number will be 0. Also note that the length of the diagonal must be
correct.
"""
return None
def matvec(self, v, c, format=None, axis=0):
u = self.trialfunction[0]
ss = [slice(None)]*len(v.shape)
ss[axis] = u.slice()
c = super(SpectralMatrix, self).matvec(v[tuple(ss)], c, format=format, axis=axis)
return c
@property
def tensorproductspace(self):
"""Return the :class:`.TensorProductSpace` this matrix has been
computed for"""
return self.testfunction[0].tensorproductspace
@property
def axis(self):
"""Return the axis of the :class:`.TensorProductSpace` this matrix is
created for"""
return self.testfunction[0].axis
def __hash__(self):
return hash(((self.testfunction[0].__class__, self.testfunction[1]),
(self.trialfunction[0].__class__, self.trialfunction[1])))
def get_key(self):
if self.__class__.__name__.endswith('mat'):
return self.__class__.__name__
return self.__hash__()
def __eq__(self, a):
if isinstance(a, Number):
return False
if not isinstance(a, SparseMatrix):
return False
if self.shape != a.shape:
return False
if self.get_key() != a.get_key():
return False
d0 = self.diags('csr', False).data
a0 = a.diags('csr', False).data
if d0.shape[0] != a0.shape[0]:
return False
if not np.linalg.norm(d0-a0) < 1e-8:
return False
return True
def is_bc_matrix(self):
return self.trialfunction[0].boundary_condition() == 'Apply'
class Identity(SparseMatrix):
"""The identity matrix in :class:`.SparseMatrix` form
Parameters
----------
shape : 2-tuple of ints
The shape of the matrix
scale : number, optional
Scalar multiple of the matrix, defaults to unity
"""
def __init__(self, shape, scale=1):
SparseMatrix.__init__(self, {0: 1}, shape, scale)
self.measure = 1
def solve(self, b, u=None, axis=0, constraints=()):
if u is None:
u = b
else:
assert u.shape == b.shape
u[:] = b
u *= (1/self.scale)
return u
class ScipyMatrix(csr_matrix):
def __init__(self, mats):
assert isinstance(mats, (SparseMatrix, list))
self.bc_mats = []
if isinstance(mats, list):
bc_mats = extract_bc_matrices([mats])
mats = sum(mats[1:], mats[0])
self.bc_mats = bc_mats
csr_matrix.__init__(self, mats.diags('csr'))
def matvec(self, v, c, axis=0):
"""Matrix vector product
Returns c = dot(self, v)
Parameters
----------
v : array
Numpy input array of ndim>=1
c : array
Numpy output array of same shape as v
axis : int, optional
The axis over which to take the matrix vector product
"""
M = self.shape[1]
c.fill(0)
# Roll relevant axis to first
if axis > 0:
v = np.moveaxis(v, axis, 0)
c = np.moveaxis(c, axis, 0)
P = int(np.prod(v.shape[1:]))
y = self.dot(v[:M].reshape(M, P)).squeeze()
d = tuple([slice(0, m) for m in y.shape])
c[d] = y.reshape(c[d].shape)
if self.bc_mats:
w0 = np.zeros_like(c)
for bc_mat in self.bc_mats:
c += bc_mat.matvec(v, w0, axis=0)
if axis > 0:
c = np.moveaxis(c, 0, axis)
v = np.moveaxis(v, 0, axis)
return c
def BlockMatrices(tpmats):
"""Return two instances of the :class:`.BlockMatrix` class.
Parameters
----------
tpmats : sequence of :class:`.TPMatrix`'es or single :class:`.BlockMatrix`
There can be both boundary matrices from inhomogeneous Dirichlet
or Neumann conditions, as well as regular matrices.
Note
----
Use :class:`.BlockMatrix` directly if you do not have any inhomogeneous
boundary conditions.
"""
if isinstance(tpmats, BlockMatrix):
tpmats = tpmats.get_mats()
bc_mats = extract_bc_matrices([tpmats])
assert len(bc_mats) > 0, 'No boundary matrices - use BlockMatrix'
return BlockMatrix(tpmats), BlockMatrix(bc_mats)
class BlockMatrix:
r"""A class for block matrices
Parameters
----------
tpmats : sequence of :class:`.TPMatrix` or :class:`.SparseMatrix`
The individual blocks for the matrix
Note
----
The tensor product matrices may be either boundary
matrices, regular matrices, or a mixture of both.
Example
-------
Stokes equations, periodic in x and y-directions
.. math::
-\nabla^2 u - \nabla p &= 0 \\
\nabla \cdot u &= 0 \\
u(x, y, z=\pm 1) &= 0
We use for the z-direction a Dirichlet basis (SD) and a regular basis with
no boundary conditions (ST). This is combined with Fourier in the x- and
y-directions (K0, K1), such that we get two TensorProductSpaces (TD, TT)
that are tensor products of these bases
.. math::
TD &= K0 \otimes K1 \otimes SD \\
TT &= K0 \otimes K1 \otimes ST
We choose trialfunctions :math:`u \in [TD]^3` and :math:`p \in TT`, and then
solve the weak problem
.. math::
\left( \nabla v, \nabla u\right) + \left(\nabla \cdot v, p \right) = 0\\
\left( q, \nabla \cdot u\right) = 0
for all :math:`v \in [TD]^3` and :math:`q \in TT`.
To solve the problem we need to assemble a block matrix
.. math::
\begin{bmatrix}
\left( \nabla v, \nabla u\right) & \left(\nabla \cdot v, p \right) \\
\left( q, \nabla \cdot u\right) & 0
\end{bmatrix}
This matrix is assembled below
>>> from shenfun import *
>>> from mpi4py import MPI
>>> comm = MPI.COMM_WORLD
>>> N = (24, 24, 24)
>>> K0 = FunctionSpace(N[0], 'Fourier', dtype='d')
>>> K1 = FunctionSpace(N[1], 'Fourier', dtype='D')
>>> SD = FunctionSpace(N[2], 'Legendre', bc=(0, 0))
>>> ST = FunctionSpace(N[2], 'Legendre')
>>> TD = TensorProductSpace(comm, (K0, K1, SD), axes=(2, 1, 0))
>>> TT = TensorProductSpace(comm, (K0, K1, ST), axes=(2, 1, 0))
>>> VT = VectorSpace(TD)
>>> Q = CompositeSpace([VT, TD])
>>> up = TrialFunction(Q)
>>> vq = TestFunction(Q)
>>> u, p = up
>>> v, q = vq
>>> A00 = inner(grad(v), grad(u))
>>> A01 = inner(div(v), p)
>>> A10 = inner(q, div(u))
>>> M = BlockMatrix(A00+A01+A10)
"""
def __init__(self, tpmats):
assert isinstance(tpmats, (list, tuple))
if isinstance(tpmats[0], TPMatrix):
if len(tpmats[0].naxes) > 0:
tpmats = get_simplified_tpmatrices(tpmats)
tpmats = [tpmats] if not isinstance(tpmats[0], (list, tuple)) else tpmats
self.testbase = testbase = tpmats[0][0].testbase
self.trialbase = trialbase = tpmats[0][0].trialbase
self.dims = dims = (testbase.num_components(), trialbase.num_components())
self.mats = np.zeros(dims, dtype=int).tolist()
self._Ai = None
self.solver = None
self += tpmats
def __add__(self, a):
"""Return copy of self.__add__(a) <==> self+a"""
return BlockMatrix(self.get_mats()+a.get_mats())
def __iadd__(self, a):
"""self.__iadd__(a) <==> self += a
Parameters
----------
a : :class:`.BlockMatrix` or list of :class:`.TPMatrix` instances
"""
if isinstance(a, BlockMatrix):
tpmats = a.get_mats()
elif isinstance(a, (list, tuple)):
tpmats = a
for mat in tpmats:
if not isinstance(mat, list):
mat = [mat]
for m in mat:
assert isinstance(m, (TPMatrix, SparseMatrix))
i, j = m.global_index
m0 = self.mats[i][j]
if isinstance(m0, int):
self.mats[i][j] = [m]
else:
found = False
for n in m0:
if m == n:
n += m
found = True
continue
if not found:
self.mats[i][j].append(m)
def get_mats(self, return_first=False):
"""Return flattened list of matrices in self
Parameters
----------
return_first : bool, optional
Return just the first matrix in the loop if True
"""
tpmats = []
for mi in self.mats:
for mij in mi:
if isinstance(mij, (list, tuple)):
for m in mij:
if isinstance(m, (TPMatrix, SparseMatrix)):
if return_first:
return m
else:
tpmats.append(m)
return tpmats
def matvec(self, v, c, format=None, use_scipy=None):
"""Compute matrix vector product
.. math::
c = A v
where :math:`A` is the self block matrix and :math:`v,c` are flattened
instances of the class :class:`.Function`.
Parameters
----------
v : :class:`.Function`
c : :class:`.Function`
format : str, optional
The format of the matrices used for the matvec.
See `Scipy sparse matrices <https://docs.scipy.org/doc/scipy/reference/sparse.html>`_
use_scipy : boolean, optional
Whether to assemble and use scipy's bmat for the matvec, or to use
the matvec methods of this BlockMatrix's TPMatrices.
Using ``config['matrix']['block']['use_scipy']`` if use_scipy is None
Returns
-------
c : :class:`.Function`
"""
assert v.function_space() == self.trialbase
assert c.function_space() == self.testbase
nvars = c.function_space().num_components()
c = np.expand_dims(c, 0) if nvars == 1 else c
v = np.expand_dims(v, 0) if nvars == 1 else v
c.v.fill(0)
use_scipy = config['matrix']['block']['use_scipy'] if use_scipy is None else use_scipy
if self.contains_bc_matrix():
use_scipy = False
if use_scipy:
self.assemble(format)
daxes = self.testbase.get_diagonal_axes()
if len(daxes) == self.testbase.dimensions:
# Only Fourier
assert isinstance(self._Ai, spmatrix)
c.flatten()[:] = self._Ai * v.flatten()
else:
if len(daxes) > 0:
daxes += 1
sl1, dims1 = self.trialbase._get_ndiag_slices_and_dims()
sl2, dims2 = self.testbase._get_ndiag_slices_and_dims()
gi = np.zeros(dims1[-1], dtype=v.dtype)
go = np.zeros(dims2[-1], dtype=v.dtype)
for key, val in self._Ai.items():
key = np.atleast_1d(key)
if len(daxes) > 0:
sl1.T[daxes] = np.array(key)[:, None]
sl2.T[daxes] = np.array(key)[:, None]
gi = v.copy_to_flattened(gi, key, dims1, sl1)
go[:] = val * gi
c = c.copy_from_flattened(go, key, dims2, sl2)
else:
z = np.zeros_like(c.v[0])
for i, mi in enumerate(self.mats):
for j, mij in enumerate(mi):
if isinstance(mij, Number):
if abs(mij) > 1e-8:
c.v[i] += mij*v.v[j]
else:
for m in mij:
z.fill(0)
z = m.matvec(v.v[j], z, format=format)
c.v[i] += z
c = c.reshape(c.shape[1:]) if nvars == 1 else c
v = v.reshape(v.shape[1:]) if nvars == 1 else v
return c
def __getitem__(self, ij):
return self.mats[ij[0]][ij[1]]
def contains_bc_matrix(self):
for mi in self.mats:
for mij in mi:
if isinstance(mij, (list, tuple)):
for m in mij:
if m.is_bc_matrix() is True:
return True
return False
def contains_regular_matrix(self):
for mi in self.mats:
for mij in mi:
if isinstance(mij, (list, tuple)):
for m in mij:
if m.is_bc_matrix() is False:
return True
return False
def assemble(self, format=None):
"""Assemble matrices in scipy sparse format
Parameters
----------
format : str or None, optional
The format of the sparse scipy matrix. Using
``config['matrix']['block']['assemble']`` if None.
"""
if self._Ai is not None:
return
self._Ai = {}
N = self.testbase.forward.output_array.shape
dimensions = self.testbase.dimensions
daxes = self.get_diagonal_axes()
ndindices = [(0,)] if (len(daxes) == 0 or len(daxes) == dimensions) else np.ndindex(tuple(np.array(N)[daxes]))
format = config['matrix']['block']['assemble'] if format is None else format
for i in ndindices:
i = i[0] if len(i) == 1 else i
if format == 'csc':
Ai = self.diags(i, format='csr').tocsc() # because of bug in scipy
else:
Ai = self.diags(i, format=format)
self._Ai[i] = Ai
def get_diagonal_axes(self):
if self.testbase.dimensions == 1:
return np.array([])
tpmat = self.get_mats(True)
return np.setxor1d(tpmat.naxes, range(tpmat.dimensions)).astype(int)
def diags(self, it=None, format=None):
"""Return global block matrix in scipy sparse format
For multidimensional forms the returned matrix is constructed for
given indices in the periodic directions.
Parameters
----------
it : n-tuple of ints or None, optional
where n is dimensions-1. These are the indices into the diagonal
axes, or the axes with Fourier bases.
format : str or None, optional
The format of the returned matrix. See `Scipy sparse matrices <https://docs.scipy.org/doc/scipy/reference/sparse.html>`_
If None, then use default for :class:`.TPMatrix`.
"""
from .spectralbase import MixedFunctionSpace
if self.contains_bc_matrix() and self.contains_regular_matrix():
raise RuntimeError('diags only works for pure boundary or pure regular matrices. Consider splitting this BlockMatrix using :func:`.BlockMatrices`')
bm = []
for mi in self.mats:
bm.append([])
for mij in mi:
if isinstance(mij, Number):
bm[-1].append(None)
else:
m = mij[0]
if isinstance(self.testbase, MixedFunctionSpace) or len(m.naxes) == len(m.mats) or len(m.naxes) == 0:
d = m.diags(format)
for mj in mij[1:]:
d = d + mj.diags(format)
elif len(m.naxes) == 2: # 2 non-periodic directions
iit = np.where(np.array(m.scale.shape) == 1, 0, it) # if shape is 1 use index 0, else use given index (shape=1 means the scale is constant in that direction)
d = m.scale[tuple(iit)]*kron(m.mats[m.naxes[0]].diags(format=format), m.mats[m.naxes[1]].diags(format=format))
for mj in mij[1:]:
iit = np.where(np.array(mj.scale.shape) == 1, 0, it)
sc = mj.scale[tuple(iit)]
d = d + sc*kron(mj.mats[mj.naxes[0]].diags(format=format), mj.mats[mj.naxes[1]].diags(format=format))
else:
assert len(m.naxes) == 1
iit = np.zeros(m.dimensions, dtype=int)
diagonal_axes = self.get_diagonal_axes()
assert len(diagonal_axes) + len(m.naxes) == m.dimensions
iit[diagonal_axes] = it
ij = np.where(np.array(m.scale.shape) == 1, 0, iit) # if shape is 1 use index 0, else use given index (shape=1 means the scale is constant in that direction)
sc = m.scale[tuple(ij)]
d = sc*m.mats[m.naxes[0]].diags(format)
for mj in mij[1:]:
ij = np.where(np.array(mj.scale.shape) == 1, 0, iit)
sc = mj.scale[tuple(ij)]
d = d + sc*mj.mats[mj.naxes[0]].diags(format)
bm[-1].append(d)
return bmat(bm, format=format)
def solve(self, b, u=None, constraints=()):
r"""
Solve matrix system Au = b
where A is the current :class:`.BlockMatrix` (self)
Parameters
----------
b : array
Array of right hand side
u : array, optional
Output array
constraints : sequence of 3-tuples of (int, int, number)
Any 3-tuple describe a dof to be constrained. The first int
represents the block number of the function to be constrained. The
second int gives which degree of freedom to constrain and the number
gives the value it should obtain. For example, for the global
restriction that
.. math::
\frac{1}{V}\int p dx = number
where we have
.. math::
p = \sum_{k=0}^{N-1} \hat{p}_k \phi_k
it is sufficient to fix the first dof of p, \hat{p}_0, since
the bases are created such that all basis functions except the
first integrates to zero. So in this case the 3-tuple can be
(2, 0, 0) if p is found in block 2 of the mixed basis.
The constraint can only be applied to bases with no given
explicit boundary condition, like the pure Chebyshev or Legendre
bases.
"""
from .la import BlockMatrixSolver
sol = self.solver
if self.solver is None:
sol = BlockMatrixSolver(self)
self.solver = sol
u = sol(b, u, constraints)
return u
class TPMatrix:
"""Tensor product matrix
A :class:`.TensorProductSpace` is the tensor product of ``D`` univariate
function spaces. A normal matrix (a second order tensor) is assembled from
bilinear forms (i.e., forms containing both test and trial functions) on
one univariate function space. A bilinear form on a tensor product space
will assemble to ``D`` outer products of such univariate matrices. That is,
for a two-dimensional tensor product you get fourth order tensors (outer
product of two matrices), and three-dimensional tensor product spaces leads
to a sixth order tensor (outer product of three matrices). This class
contains ``D`` second order matrices. The complete matrix is as such the
outer product of these ``D`` matrices.
Note that the outer product of two matrices often is called the Kronecker
product.
Parameters
----------
mats : sequence, or sequence of sequence of matrices
Instances of :class:`.SpectralMatrix` or :class:`.SparseMatrix`
The length of ``mats`` is the number of dimensions of the
:class:`.TensorProductSpace`
testspace : Function space
The test :class:`.TensorProductSpace`
trialspace : Function space
The trial :class:`.TensorProductSpace`
scale : array, optional
Scalar multiple of matrices. Must have ndim equal to the number of
dimensions in the :class:`.TensorProductSpace`, and the shape must be 1
along any directions with a nondiagonal matrix.
global_index : 2-tuple, optional
Indices (test, trial) into mixed space :class:`.CompositeSpace`.
testbase : :class:`.CompositeSpace`, optional
Instance of the base test space
trialbase : :class:`.CompositeSpace`, optional
Instance of the base trial space
"""
def __init__(self, mats, testspace, trialspace, scale=1.0, global_index=None,
testbase=None, trialbase=None):
assert isinstance(mats, (list, tuple))
assert len(mats) == len(testspace)
self.mats = mats
self.space = testspace
self.trialspace = trialspace
self.scale = scale
self.pmat = 1
self.naxes = testspace.get_nondiagonal_axes()
self.global_index = global_index
self.testbase = testbase
self.trialbase = trialbase
self._issimplified = False
def get_simplified(self):
diagonal_axes = np.setxor1d(self.naxes, range(self.space.dimensions)).astype(int)
if len(diagonal_axes) == 0 or self._issimplified:
return self
mats = []
scale = copy(self.scale)
for axis in range(self.dimensions):
mat = self.mats[axis]
if axis in diagonal_axes:
d = mat[0]
if np.ndim(d):
d = self.space[axis].broadcast_to_ndims(d*mat.scale)
scale = scale*d
mat = Identity(mat.shape)
mats.append(mat)
tpmat = TPMatrix(mats, self.space, self.trialspace, scale=scale,
global_index=self.global_index,
testbase=self.testbase, trialbase=self.trialbase)
# Decomposition
if len(self.space) > 1:
s = tpmat.scale.shape
ss = [slice(None)]*self.space.dimensions
ls = self.space.local_slice()
for axis, shape in enumerate(s):
if shape > 1:
ss[axis] = ls[axis]
tpmat.scale = (tpmat.scale[tuple(ss)]).copy()
# If only one non-diagonal matrix, then make a simple link to
# this matrix.
if len(tpmat.naxes) == 1:
tpmat.pmat = tpmat.mats[tpmat.naxes[0]]
elif len(tpmat.naxes) == 2: # 2 nondiagonal
tpmat.pmat = tpmat.mats
tpmat._issimplified = True
return tpmat
def simplify_diagonal_matrices(self):
if self._issimplified:
return
diagonal_axes = np.setxor1d(self.naxes, range(self.space.dimensions)).astype(int)
if len(diagonal_axes) == 0:
return
for axis in diagonal_axes:
mat = self.mats[axis]
if self.dimensions == 1: # Don't bother with the 1D case
continue
else:
d = mat[0] # get diagonal
if np.ndim(d):
d = self.space[axis].broadcast_to_ndims(d*mat.scale)
self.scale = self.scale*d
self.mats[axis] = Identity(mat.shape)
# Decomposition
if len(self.space) > 1:
s = self.scale.shape
ss = [slice(None)]*self.space.dimensions
ls = self.space.local_slice()
for axis, shape in enumerate(s):
if shape > 1:
ss[axis] = ls[axis]
self.scale = (self.scale[tuple(ss)]).copy()
# If only one non-diagonal matrix, then make a simple link to
# this matrix.
if len(self.naxes) == 1:
self.pmat = self.mats[self.naxes[0]]
elif len(self.naxes) == 2: # 2 nondiagonal
self.pmat = self.mats
self._issimplified = True
def solve(self, b, u=None, constraints=()):
if u is None:
u = b
else:
assert u.shape == b.shape
u[:] = b
tpmat = self.get_simplified()
if len(tpmat.naxes) == 0:
if np.all([isinstance(m, Identity) for m in tpmat.mats]) and isinstance(tpmat.scale, Number):
if abs(tpmat.scale-1) < 1e-8:
return u
sl = tuple([s.slice() for s in tpmat.trialspace.bases])
d = tpmat.scale
with np.errstate(divide='ignore'):
d = 1./tpmat.scale
if constraints:
assert constraints[0] == (0, 0)
# Constraint is enforced automatically
d = np.where(np.isfinite(d), d, 0)
u[sl] = b[sl] * d[sl]
elif len(tpmat.naxes) == 1:
from shenfun.la import SolverGeneric1ND
H = SolverGeneric1ND([tpmat])
u = H(b, u, constraints=constraints)
elif len(tpmat.naxes) == 2:
from shenfun.la import SolverGeneric2ND
H = SolverGeneric2ND([tpmat])
u = H(b, u, constraints=constraints)
return u
def matvec(self, v, c, format=None):
tpmat = self.get_simplified()
c.fill(0)
if len(tpmat.naxes) == 0:
c[:] = tpmat.scale*v
elif len(tpmat.naxes) == 1:
axis = tpmat.naxes[0]
rank = v.rank if hasattr(v, 'rank') else 0
if rank == 0:
c = tpmat.pmat.matvec(v, c, format=format, axis=axis)
else:
c = tpmat.pmat.matvec(v[tpmat.global_index[1]], c, format=format, axis=axis)
c[:] = c*tpmat.scale
elif len(tpmat.naxes) == 2:
# 2 non-periodic directions (may be non-aligned in second axis, hence transfers)
npaxes = deepcopy(list(tpmat.naxes))
space = tpmat.space
newspace = False
if space.forward.input_array.shape != space.forward.output_array.shape:
space = space.get_unplanned(True) # in case self.space is padded
newspace = True
pencilA = space.forward.output_pencil
subcomms = [s.Get_size() for s in pencilA.subcomm]
axis = pencilA.axis
assert subcomms[axis] == 1
npaxes.remove(axis)
second_axis = npaxes[0]
pencilB = pencilA.pencil(second_axis)
transAB = pencilA.transfer(pencilB, c.dtype.char)
cB = np.zeros(transAB.subshapeB, dtype=c.dtype)
cC = np.zeros(transAB.subshapeB, dtype=c.dtype)
bb = tpmat.mats[axis]
c = bb.matvec(v, c, format=format, axis=axis)
# align in second non-periodic axis
transAB.forward(c, cB)
bb = tpmat.mats[second_axis]
cC = bb.matvec(cB, cC, format=format, axis=second_axis)
transAB.backward(cC, c)
c *= tpmat.scale
if newspace:
space.destroy()
return c
def get_key(self):
naxis = self.space.get_nondiagonal_axes()
assert len(naxis) == 1
return self.mats[naxis[0]].get_key()
def isidentity(self):
return np.all([m.isidentity() for m in self.mats])
def isdiagonal(self):
return np.all([m.isdiagonal() for m in self.mats])
def is_bc_matrix(self):
for m in self.mats:
if m.is_bc_matrix():
return True
return False
@property
def dimensions(self):
"""Return dimension of TPMatrix"""
return len(self.mats)
def __mul__(self, a):
"""Returns copy of self.__mul__(a) <==> self*a"""
if isinstance(a, Number):
return TPMatrix(self.mats, self.space, self.trialspace, self.scale*a,
self.global_index, self.testbase, self.trialbase)
assert isinstance(a, np.ndarray)
c = np.empty_like(a)
c = self.matvec(a, c)
return c
def __rmul__(self, a):
"""Returns copy of self.__rmul__(a) <==> a*self"""
if isinstance(a, Number):
return self.__mul__(a)
else:
raise NotImplementedError
def __imul__(self, a):
"""Returns self.__imul__(a) <==> self*=a"""
if isinstance(a, Number):
self.scale *= a
elif isinstance(a, np.ndarray):
self.scale = self.scale*a
return self
def __div__(self, a):
"""Returns copy self.__div__(a) <==> self/a"""
if isinstance(a, Number):
return TPMatrix(self.mats, self.space, self.trialspace, self.scale/a,
self.global_index, self.testbase, self.trialbase)
elif isinstance(a, np.ndarray):
b = np.zeros_like(a)
b = self.solve(a, b)
return b
else:
raise NotImplementedError
def __neg__(self):
"""self.__neg__() <==> -self"""
A = self.copy()
A.scale = self.scale*-1
return A
def __eq__(self, a):
"""Check if matrices and global_index are the same.
Note
----
The attribute scale may still be different
"""
assert isinstance(a, TPMatrix)
if not self.global_index == a.global_index:
return False
for m0, m1 in zip(self.mats, a.mats):
if not m0.get_key() == m1.get_key():
return False
if not m0 == m1:
return False
return True
def __ne__(self, a):
return not self.__eq__(a)
def __add__(self, a):
"""Return copy of self.__add__(a) <==> self+a"""
assert isinstance(a, TPMatrix)
assert self == a
return TPMatrix(self.mats, self.space, self.trialspace, self.scale+a.scale,
self.global_index, self.testbase, self.trialbase)
def __iadd__(self, a):
"""self.__iadd__(a) <==> self += a"""
assert isinstance(a, TPMatrix)
assert self == a
self.scale = self.scale + a.scale
return self
def __sub__(self, a):
"""Return copy of self.__sub__(a) <==> self-a"""
assert isinstance(a, TPMatrix)
assert self == a
return TPMatrix(self.mats, self.space, self.trialspace, self.scale-a.scale,
self.global_index, self.testbase, self.trialbase)
def __isub__(self, a):
"""self.__isub__(a) <==> self -= a"""
assert isinstance(a, TPMatrix)
assert self == a
self.scale = self.scale - a.scale
return self
def copy(self):
"""Return TPMatrix deep copy of self"""
return self.__deepcopy__()
def __copy__(self):
mats = []
for mat in self.mats:
mats.append(mat.__copy__())
return TPMatrix(mats, self.space, self.trialspace, self.scale,
self.global_index, self.testbase, self.trialbase)
def __deepcopy__(self, memo=None, _nil=[]):
mats = []
for mat in self.mats:
mats.append(mat.__deepcopy__())
return TPMatrix(mats, self.space, self.trialspace, self.scale,
self.global_index, self.testbase, self.trialbase)
def diags(self, format=None):
assert self._issimplified is False
if self.dimensions == 2:
mat = kron(self.mats[0].diags(format=format),
self.mats[1].diags(format=format),
format=format)
elif self.dimensions == 3:
mat = kron(self.mats[0].diags(format=format),
kron(self.mats[1].diags(format=format),
self.mats[2].diags(format=format),
format=format),
format=format)
elif self.dimensions == 4:
mat = kron(self.mats[0].diags(format=format),
kron(self.mats[1].diags(format=format),
kron(self.mats[2].diags(format=format),
self.mats[3].diags(format=format),
format=format),
format=format),
format=format)
elif self.dimensions == 5:
mat = kron(self.mats[0].diags(format=format),
kron(self.mats[1].diags(format=format),
kron(self.mats[2].diags(format=format),
kron(self.mats[3].diags(format=format),
self.mats[4].diags(format=format),
format=format),
format=format),
format=format),
format=format)
return mat*np.atleast_1d(self.scale).item()
def get_simplified_tpmatrices(tpmats):
"""Return copy of tpmats list, where diagonal matrices have been
simplified and placed in scale arrays.
Parameters
----------
tpmats
Instances of :class:`.TPMatrix`
Returns
-------
List[TPMatrix]
List of :class:`.TPMatrix`'es, that have been simplified
"""
A = []
for tpmat in tpmats:
A.append(tpmat.get_simplified())
# Add equal matrices
B = [A[0]]
for a in A[1:]:
found = False
for b in B:
if a == b:
b += a
found = True
if not found:
B.append(a)
return B
def check_sanity(A, test, trial, measure=1, assemble='quadrature', kind='vandermonde', fixed_resolution=None):
"""Sanity check for matrix.
Test that created matrix agrees with quadrature computed using a
memory-consuming Vandermonde implementation.
Parameters
----------
A : matrix
test : 2-tuple of (basis, int)
The basis is an instance of a class for one of the bases in
- :mod:`.legendre.bases`
- :mod:`.chebyshev.bases`
- :mod:`.chebyshevu.bases`
- :mod:`.ultraspherical.bases`
- :mod:`.fourier.bases`
- :mod:`.laguerre.bases`
- :mod:`.hermite.bases`
- :mod:`.jacobi.bases`
The int represents the number of times the test function
should be differentiated. Representing matrix row.
trial : 2-tuple of (basis, int)
As test, but representing matrix column.
measure : sympy function of coordinate, optional
Function in the physical coordinate. Gets mapped to
reference domain.
assemble : str, optional
Determines how to perform the integration we compare with
- 'exact'
- 'adaptive'
- 'quadrature'
kind : str, optional
The type of quadrature to compare with.
- 'vandermonde'
- 'stencil'
fixed_resolution : None or str, optional
A fixed number of quadrature points used to compute the matrix.
If 'fixed_resolution' is set, then assemble is set to 'quadrature' and
kind is set to 'vandermonde'.
"""
if fixed_resolution is not None:
kind = 'vandermonde'
if len(sp.sympify(measure).free_symbols) == 1:
if test[0].domain != test[0].reference_domain():
x0 = measure.free_symbols.pop()
xm = test[0].map_true_domain(x0)
measure = measure.replace(x0, xm)
Dsp = SpectralMatrix(test, trial, measure=measure, assemble=assemble, kind=kind, fixed_resolution=fixed_resolution)
for key, val in A.items():
assert np.allclose(val*A.scale, Dsp[key])
def extract_diagonal_matrix(M, lowerband=None, upperband=None, abstol=1e-10, reltol=1e-10):
"""Return SparseMatrix version of dense matrix ``M``
Parameters
----------
M : Numpy array of ndim=2 or sparse scipy matrix
lowerband : int or None
Assumed lower bandwidth of M
upperband : int or None
Assumed upper bandwidth of M
abstol : float
Tolerance. Only diagonals with max(:math:`|d|`) < tol are
kept in the returned SparseMatrix, where :math:`d` is the
diagonal
reltol : float
Relative tolerance. Only diagonals with
max(:math:`|d|`)/max(:math:`|M|`) > reltol are kept in the
returned SparseMatrix
"""
d = {}
if isinstance(M, spmatrix):
M = M.tocsr()
relmax = abs(M).max()
dtype = float if M.dtype == 'O' else M.dtype # For mpf object
upperband = M.shape[1] if upperband is None else min(upperband+1, M.shape[1])
lowerband = M.shape[0]-1 if lowerband is None else min(lowerband, M.shape[0]-1)
for i in range(-lowerband, upperband):
u = M.diagonal(i).copy()
if abs(u).max() > abstol and abs(u).max()/relmax > reltol:
d[i] = np.array(u, dtype=dtype)
return SparseMatrix(d, M.shape)
def extract_bc_matrices(mats):
"""Extract boundary matrices from list of ``mats``
Parameters
----------
mats : list of list of instances of :class:`.TPMatrix` or
:class:`.SparseMatrix`
Returns
-------
list
list of boundary matrices.
Note
----
The ``mats`` list is modified in place since boundary matrices are
extracted.
"""
bc_mats = []
for a in mats:
for b in a.copy():
if b.is_bc_matrix():
bc_mats.append(b)
a.remove(b)
return bc_mats
def _get_matrix(test, trial, measure=1, assemble=None, fixed_resolution=None):
"""Return assembled matrix
This internal function is used by :class:`.SpectralMatrix`
Parameters
----------
test : 2-tuple of (basis, int)
The basis is an instance of a class for one of the bases in
- :mod:`.legendre.bases`
- :mod:`.chebyshev.bases`
- :mod:`.chebyshevu.bases`
- :mod:`.ultraspherical.bases`
- :mod:`.fourier.bases`
- :mod:`.laguerre.bases`
- :mod:`.hermite.bases`
- :mod:`.jacobi.bases`
The int represents the number of times the test function
should be differentiated. Representing matrix row.
trial : 2-tuple of (basis, int)
As test, but representing matrix column.
measure : Sympy expression of coordinate, or number, optional
Additional weight to integral. For example, in cylindrical
coordinates an additional measure is the radius `r`.
assemble : None or str, optional
Determines how to perform the integration
- 'quadrature' (default)
- 'exact'
- 'adaptive'
fixed_resolution : None or str, optional
A fixed number of quadrature points used to compute the inner product.
If 'fixed_resolution' is set, then assemble is set to 'quadrature'.
Note
----
The computed matrix is not compensated for a non-standard domain size.
This is because all pre-computed matrices use the reference domain, and
compensate for the domain size later. This function is a drop-in for all
pre-computed matrices, and thus needs to assume a standard reference domain.
To create a true matrix with this function, do not use it directly, but
wrapped in the SpectralMatrix class, like
>>> from shenfun import inner, TestFunction, TrialFunction, FunctionSpace
>>> L = FunctionSpace(4, 'L', domain=(-2, 2))
>>> u = TrialFunction(L)
>>> v = TestFunction(L)
>>> D = inner(u, v, assemble='exact')
>>> dict(D)
{0: array([4. , 1.33333333, 0.8 , 0.57142857])}
"""
K0 = test[0].slice().stop - test[0].slice().start
K1 = trial[0].slice().stop - trial[0].slice().start
if assemble == 'quadrature':
if fixed_resolution is not None:
test2 = test[0].get_refined(fixed_resolution)
N = test2.N
x = test2.points_and_weights(N, map_true_domain=False)[0]
ws = test2.get_measured_weights(N, measure, map_true_domain=False)
else:
N = test[0].N
x = test[0].points_and_weights(N, map_true_domain=False)[0]
ws = test[0].get_measured_weights(N, measure, map_true_domain=False)
u = trial[0].evaluate_basis_derivative_all(x=x, k=trial[1])[:, :K1]
if trial[0].boundary_condition() == 'Apply':
if np.linalg.norm(u) < 1e-14:
return {}
v = test[0].evaluate_basis_derivative_all(x=x, k=test[1])[:, :K0]
V = np.dot(np.conj(v.T)*ws[np.newaxis, :], u)
else: # exact or adaptive
x = sp.Symbol('x', real=True)
if not measure == 1:
if isinstance(measure, sp.Expr):
s = measure.free_symbols
assert len(s) == 1
x = s.pop()
xm = test[0].map_true_domain(x)
if test[0].family() == 'chebyshev':
measure = measure.subs(x, sp.cos(xm))
else:
measure = measure.subs(x, xm)
else:
assert isinstance(measure, Number)
# Exact integration is much more expensive than quadrature and
# as such we use quadrature first simply to get the sparsity pattern.
try:
R = _get_matrix(test, trial, measure=measure, assemble='quadrature')
except:
R = {k: None for k in np.arange(-test[0].dim(), test[0].dim()+1)}
V = np.zeros((K0, K1), dtype=test[0].forward.output_array.dtype)
if test[0].family() == 'chebyshev':
# Transform integral using x=cos(theta)
assert test[1] == 0
S0 = test[0].stencil_matrix().diags('csr')
S1 = trial[0].stencil_matrix().diags('csr')
for i in range(test[0].slice().start, test[0].slice().stop):
M0 = S0.getrow(i)
pi = sp.S(0)
for ind, d in zip(M0.indices, M0.data):
pi += d*sp.cos(ind*x)
for jq in R.keys():
j = i+jq
if j < 0 or j >= K1:
continue
M1 = S1.getrow(j)
pj = sp.S(0)
for ind, d in zip(M1.indices, M1.data):
pj += d*sp.cos(ind*x)
# df(theta)/dx = df/dtheta*dtheta/dx - apply recursively
for _ in range(trial[1]):
pj = -pj.diff(x, 1)/sp.sin(x)
integrand = measure*pi*pj
if assemble == 'exact':
V[i, j] = sp.integrate(integrand, (x, 0, sp.pi))
elif assemble == 'adaptive':
if isinstance(integrand, Number):
V[i, j] = integrand*np.pi
else:
V[i, j] = quadrature(sp.lambdify(x, integrand), 0, np.pi, miniter=8, tol=1e-12, rtol=1e-12)[0]
else:
measure *= test[0].weight() # Weight of weighted space (in reference domain)
domain = test[0].reference_domain()
for i in range(test[0].slice().start, test[0].slice().stop):
pi = np.conj(test[0].basis_function(i, x=x))
for jq in R.keys():
j = i+jq
if j < 0 or j >= K1:
continue
pj = trial[0].basis_function(j, x=x)
integrand = measure*pi.diff(x, test[1])*pj.diff(x, trial[1])
if assemble == 'exact':
V[i, j] = integrate_sympy(integrand, (x, domain[0], domain[1]))
elif assemble == 'adaptive':
if isinstance(integrand, Number):
V[i, j] = integrand*float(domain[1]-domain[0])
else:
V[i, j] = quadrature(sp.lambdify(x, integrand), float(domain[0]), float(domain[1]), tol=1e-12, rtol=1e-12, miniter=8)[0]
if V.dtype.char in 'FDG':
ni = np.linalg.norm(V.imag)
if ni == 0:
V = V.real.copy()
elif np.linalg.norm(V.real) / ni > 1e14:
V = V.real.copy()
return extract_diagonal_matrix(V)
def assemble_stencil(test, trial, measure=1):
if trial[0].is_boundary_basis:
return _assemble_stencil_bc(test, trial, measure)
return _assemble_stencil(test, trial, measure)
def _assemble_stencil_bc(test, trial, measure=1):
from shenfun.spectralbase import inner_product
Tv = test[0].get_orthogonal(domain=(-1, 1))
Tu = trial[0].get_orthogonal(domain=(-1, 1))
B = inner_product((Tv, test[1]), (Tu, trial[1]))
if len(B) == 0:
return {}
K = test[0].stencil_matrix()
q = sp.degree(measure)
K.shape = (test[0].dim(), test[0].N)
S = extract_diagonal_matrix(trial[0].stencil_matrix().T).diags('csr')
if measure != 1:
from shenfun.jacobi.recursions import pmat, a
from shenfun.utilities import split
assert sp.sympify(measure).is_polynomial()
A = sp.S(0)
for dv in split(measure, expand=True):
alpha = test[0].alpha
beta = test[0].beta
gn = test[0].gn
sc = dv['coeff']
msi = dv['x']
qi = sp.degree(msi)
Ax = pmat(a, qi, alpha, beta, test[0].N, test[0].N, gn)
A = A + sc*Ax.diags('csr')
A = K.diags('csr') * A.T * B.diags('csr') * S
else:
A = K.diags('csr') * B.diags('csr') * S
M = B.shape[1]
K.shape = (test[0].N, test[0].N)
d = extract_diagonal_matrix(A, lowerband=M+q, upperband=M)
d = d._storage
return d
def _assemble_stencil(test, trial, measure=1):
from shenfun.spectralbase import inner_product
Tv = test[0].get_orthogonal(domain=(-1, 1))
Tu = trial[0].get_orthogonal(domain=(-1, 1))
alpha = test[0].alpha
beta = test[0].beta
gn = test[0].gn
# This needs to be either implemented or quadrature:
B = inner_product((Tv, test[1]), (Tu, trial[1]))
K = test[0].stencil_matrix()
q = sp.degree(measure)
K.shape = (test[0].dim(), test[0].N)
S = trial[0].stencil_matrix()
S.shape = (trial[0].dim(), trial[0].N)
if measure != 1:
from shenfun.jacobi.recursions import pmat, a
from shenfun.utilities import split
assert sp.sympify(measure).is_polynomial()
A = sp.S(0)
for dv in split(measure, expand=True):
sc = dv['coeff']
msi = dv['x']
qi = sp.degree(msi)
Ax = pmat(a, qi, alpha, beta, test[0].N, test[0].N, gn)
A = A + sc*Ax.diags('csr')
A = K.diags('csr') * A.T * B.diags('csr') * S.diags('csr').T
else:
A = K.diags('csr') * B.diags('csr') * S.diags('csr').T
K.shape = (test[0].N, test[0].N)
S.shape = (trial[0].N, trial[0].N)
if test[1]+trial[1] == 0 and test[0].family() == trial[0].family():
keysK = np.sort(np.array(list(K.keys())))
keysS = np.sort(np.array(list(S.keys())))
lb = -keysK[0]+keysS[-1]+q
ub = keysK[-1]-keysS[0]+q
d = extract_diagonal_matrix(A, lowerband=lb, upperband=ub)
else:
# compute the sparsity pattern
Ac = A.tocsc()
ub = Ac.getrow(0).indices
ub2 = Ac.getrow(1).indices
if len(ub) == 0 and len(ub2) == 0:
ub = 0
else:
ub = trial[0].dim() if len(ub) == 0 else ub.max()
ub2 = trial[0].dim() if len(ub2) == 0 else ub2.max()-1
ub = max(ub, ub2)
lb = Ac.getcol(0).indices
lb2 = Ac.getcol(1).indices
if len(lb) == 0 and len(lb2) == 0:
lb = 0
else:
lb = test[0].dim() if len(lb) == 0 else lb.max()
lb2 = test[0].dim() if len(lb2) == 0 else lb2.max()-1
lb = max(lb, lb2)
d = extract_diagonal_matrix(A, lowerband=lb, upperband=ub)
d = d._storage
return d
def assemble_phi(test, trial, measure=1):
assert test[0].short_name() in ('P1', 'P2', 'P3', 'P4')
if trial[0].is_boundary_basis:
return _assemble_phi_bc(test, trial, measure)
return _assemble_phi(test, trial, measure)
def _assemble_phi(test, trial, measure=1):
from shenfun.jacobi.recursions import Lmat
from shenfun.utilities import split
assert test[0].quad != 'GL'
alpha = test[0].alpha
beta = test[0].beta
gn = test[0].gn
q = sp.degree(measure)
k = (test[0].N-test[0].dim())//2
l = k-trial[1]
assert l >= 0
D = sp.S(0)
for dv in split(measure, expand=True):
sc = dv['coeff']
msi = dv['x']
qi = sp.degree(msi)
Ax = Lmat(k, qi, l, test[0].dim(), trial[0].N, alpha, beta, gn)
D = D + sc*Ax
if trial[0].is_orthogonal:
d = extract_diagonal_matrix(D, lowerband=q-k+l, upperband=q+k+l)
else:
K = trial[0].stencil_matrix()
K.shape = (trial[0].dim(), trial[0].N)
keys = np.sort(np.array(list(K.keys())))
lb, ub = -keys[0], keys[-1]
d = extract_diagonal_matrix(D*K.diags('csr').T, lowerband=q-k+l+ub, upperband=q+k+l+lb)
K.shape = (trial[0].N, trial[0].N)
d = d._storage
return d
def _assemble_phi_bc(test, trial, measure=1):
assert trial[0].short_name() == 'BG'
from shenfun.jacobi.recursions import Lmat
from shenfun.utilities import split
alpha = test[0].alpha
beta = test[0].beta
gn = test[0].gn
M = test[0].dim()
N = trial[0].dim_ortho
q = sp.degree(measure)
k = (test[0].N-test[0].dim())//2
l = k-trial[1]
assert l >= 0
D = sp.S(0)
if k <= N:
for dv in split(measure, expand=True):
sc = dv['coeff']
msi = dv['x']
qi = sp.degree(msi)
Ax = Lmat(k, qi, l, M, N, alpha, beta, gn)
D = D + sc*Ax
if D is sp.S.Zero:
scale = 0
d = {0: 1}
else:
K = trial[0].stencil_matrix()
d = extract_diagonal_matrix(D*extract_diagonal_matrix(K).diags('csr').T, lowerband=N+q, upperband=N)
d = d._storage
return d
def assemble_sympy(test, trial, measure=1, implicit=True, assemble='exact'):
"""Return sympy representation of mass matrix
Parameters
----------
test : :class:`.TestFunction` or 2-tuple of :class:`.SpectralBase`, int
If 2-tuple, then the integer represents the number of derivatives, which
should be zero for this function
trial : Like test but representing trial function
measure : Number or Sympy function
Function of coordinate
implicit : bool, optional
Whether to use unevaluated Sympy functions instead of the actual values
of the diagonals.
assemble : str, optional
- 'exact'
- 'quadrature'
Example
-------
>>> from shenfun import assemble_sympy, TrialFunction, TestFunction
>>> N = 8
>>> D = FunctionSpace(N, 'C', bc=(0, 0))
>>> v = TestFunction(D)
>>> u = TrialFunction(D)
>>> assemble_sympy(v, u)
(KroneckerDelta(i, j) - KroneckerDelta(i, j + 2))*h(i) - (KroneckerDelta(j, i + 2) - KroneckerDelta(i + 2, j + 2))*h(i + 2)
Note that when implicit is True, then h(i) represents the l2-norm,
or the L2-norm (if exact) of the orthogonal basis.
>>> D = FunctionSpace(N, 'C', bc={'left': {'N': 0}, 'right': {'N': 0}})
>>> u = TrialFunction(D)
>>> v = TestFunction(D)
>>> assemble_sympy(v, u, implicit=True)
(KroneckerDelta(i, j) + KroneckerDelta(i, j + 2)*s2(j))*h(i) + (KroneckerDelta(j, i + 2) + KroneckerDelta(i + 2, j + 2)*s2(j))*h(i + 2)*k2(i)
>>> assemble_sympy(v, u, implicit=False)
-i**2*(-j**2*KroneckerDelta(i + 2, j + 2)/(j + 2)**2 + KroneckerDelta(j, i + 2))*h(i + 2)/(i + 2)**2 + (-j**2*KroneckerDelta(i, j + 2)/(j + 2)**2 + KroneckerDelta(i, j))*h(i)
Here the implicit version uses 'k' for the diagonals of the test function,
and 's' for the trial function. The number represents the location of the
diagonal, so 's2' is the second upper diagonal of the stencil matrix of the
trial function.
You can get the diagonals like this:
>>> import sympy as sp
>>> i, j = sp.symbols('i,j', integer=True)
>>> M = assemble_sympy(v, u, implicit=False)
>>> M.subs(j, i) # main diagonal
pi*i**4/(2*(i + 2)**4) + pi/2
>>> M.subs(j, i+2) # second upper diagonal
-pi*i**2/(2*(i + 2)**2)
>>> M.subs(j, i-2) # second lower diagonal
-pi*(i - 2)**2/(2*i**2)
i is the row number, so the last one starts for i=2.
"""
if isinstance(test, tuple) and isinstance(trial, tuple):
assert len(test) == 2 and len(trial) == 2
assert test[1]+trial[1] == 0, 'Only implemented for mass matrix, because need B to be diagonal.'
test = test[0]
trial = trial[0]
else:
from shenfun.forms import TestFunction, TrialFunction
assert isinstance(test, TestFunction) and isinstance(trial, TrialFunction)
test = test.function_space()
trial = trial.function_space()
alpha = test.alpha
beta = test.beta
gn = test.gn
Tv = test.get_orthogonal(domain=(-1, 1))
i, j, k, l, m = sp.symbols('i,j,k,l,m', integer=True)
K = test.sympy_stencil(i, k, implicit='k' if implicit is True else False)
q = sp.degree(measure)
if measure != 1:
from shenfun.jacobi.recursions import a, matpow
from shenfun.utilities import split
assert sp.sympify(measure).is_polynomial()
A = sp.S(0)
for dv in split(measure, expand=True):
sc = dv['coeff']
msi = dv['x']
qi = sp.degree(msi)
A = A + sc*matpow(a, qi, alpha, beta, l, k, gn)
if assemble == 'exact':
B = Tv.sympy_L2_norm_sq(l)*sp.KroneckerDelta(l, m)
else:
B = Tv.sympy_l2_norm_sq(l)*sp.KroneckerDelta(l, m)
S = trial[0].sympy_stencil(j, m, implicit='s' if implicit is True else False)
A = K * A * B * S
M = sp.S(0)
nd = test.N-test.dim()
for kk in range(-nd, nd+1):
M1 = A.subs(k, i + kk)
for ll in range(-q, q+1):
M2 = M1.subs(l, i + kk + ll)
for mm in range(-nd, nd+1):
M3 = M2.subs(m, i + kk + ll + mm)
M += M3
else:
if assemble == 'exact':
B = Tv.sympy_L2_norm_sq(k)*sp.KroneckerDelta(k, l)
else:
B = Tv.sympy_l2_norm_sq(k)*sp.KroneckerDelta(k, l)
S = trial.sympy_stencil(j, l, implicit='s' if implicit is True else False)
A = K * B * S
M = sp.S(0)
nd = test[0].N-test[0].dim()
for kk in range(-nd, nd+1):
M1 = A.subs(k, i + kk)
for ll in range(-nd, nd+1):
M2 = M1.subs(l, i + kk + ll)
M += M2
return M
def sympy2SparseMatrix(sympymat, shape):
i, j = sp.symbols('i,j', integer=True)
d = {}
M, N = shape
numzerorow = 0
for k in range(N):
val = sp.simplify(sympymat.subs(j, i+k))
if val == 0:
numzerorow += 1
if numzerorow >=2:
break
continue
d[k] = np.array([val.subs(i, l) for l in np.arange(min(M, N-k))]).astype(float)
if k > 0:
d[-k] = d[k].copy()
numzerorow = 0
return SparseMatrix(d, shape)
class SpectralMatDict(dict):
"""Dictionary for inner product matrices
Matrices are looked up with keys that are one of::
((test, k), (trial, l))
((test, k), (trial, l), measure)
where test and trial are classes subclassed from SpectralBase and k and l
are integers >= 0 that determines how many times the test or trial functions
should be differentiated. The measure is optional.
"""
def __missing__(self, key):
measure = 1 if len(key) == 2 else key[2]
c = functools.partial(SpectralMatrix, measure=measure)
self[key] = c
return c
def __getitem__(self, key):
if len(key) == 3:
matrix = functools.partial(dict.__getitem__(self, key),
measure=key[2])
else:
matrix = dict.__getitem__(self, key)
return matrix
|
b51d1881f5d59a143927cfd82c3102d84a3a2a2f
|
6436d1e6c23f9f43a8025889dc4414a3ad66acf2
|
/Assets/Python/Contrib/TechWindowWide.py
|
2b4be480ec7675b59951129630e590f19b30931e
|
[
"MIT"
] |
permissive
|
dguenms/Dawn-of-Civilization
|
b710195c4f46fe11d9229182c3b1e07b77f42637
|
a305e7846d085d6edf1e9c472e8dfceee1c07dd4
|
refs/heads/develop
| 2023-09-04T04:57:00.086384
| 2023-09-01T15:24:28
| 2023-09-01T15:24:28
| 45,362,597
| 116
| 121
|
MIT
| 2023-02-08T00:18:53
| 2015-11-01T23:52:28
|
C++
|
ISO-8859-1
|
Python
| false
| false
| 18,400
|
py
|
TechWindowWide.py
|
## TechWindowWide
##
## Originally by SirRethcir: Techanzeige hinzugefügt
## Enhanced by Roamty, Caesium, Guarav
##
## Copyright (c) 2008 The BUG Mod.
from CvPythonExtensions import *
import CvUtil
# BUG - Options - end
import BugCore
TechWindowOpt = BugCore.game.TechWindow
# BUG - Options - end
localText = CyTranslator()
gc = CyGlobalContext()
class CvTechSplashScreen:
"Splash screen for techs"
def __init__(self, iScreenID):
self.nScreenId = iScreenID
self.iTech = -1
self.nWidgetCount = 0
# widget names
self.WIDGET_ID = "TechSplashScreenWidget"
self.SCREEN_NAME = "TechSplashScreen"
self.EXIT_ID = "TechSplashExit"
self.X_SCREEN = 17 # 117 # Spocko 205
self.Y_SCREEN = 27
self.W_SCREEN = 1024
self.H_SCREEN = 768
self.Z_BACKGROUND = -1.1
self.Z_CONTROLS = self.Z_BACKGROUND - 0.2
self.DZ = -0.2
self.Z_HELP_AREA = self.Z_CONTROLS - 2
self.W_HELP_AREA = 200
# Panels
self.iMarginSpace = 15
self.X_MAIN_PANEL = 17 # 117 # Spocko 205
self.Y_MAIN_PANEL = 25
self.W_MAIN_PANEL = 996 #796 # Spocko 620
self.H_MAIN_PANEL = 725 # 545
# Upper Panel
self.X_UPPER_PANEL = self.X_MAIN_PANEL + self.iMarginSpace
self.Y_UPPER_PANEL = self.Y_MAIN_PANEL + self.iMarginSpace
self.W_UPPER_PANEL = self.W_MAIN_PANEL - (self.iMarginSpace * 2)
self.H_UPPER_PANEL = 320
self.X_TITLE = self.X_MAIN_PANEL + (self.W_MAIN_PANEL / 2)
self.Y_TITLE = self.Y_UPPER_PANEL + 12
self.W_ICON = 96 # 90
self.H_ICON = 96 # 90
# self.X_ICON = self.X_UPPER_PANEL + 134 # Spocko 56 # 23 #42
# self.Y_ICON = self.Y_UPPER_PANEL + (self.H_UPPER_PANEL / 2) - (self.H_ICON / 2) + 17
self.X_ICON_PANEL = self.X_UPPER_PANEL + self.iMarginSpace + 2
self.Y_ICON_PANEL = self.Y_UPPER_PANEL + self.iMarginSpace + 33
self.W_ICON_PANEL = 160 # 140 # 200 per Roamty # Spocko was 140
self.H_ICON_PANEL = 135 # self.H_MAIN_PANEL - (self.iMarginSpace * 2)
self.X_ICON = self.X_ICON_PANEL + self.W_ICON_PANEL / 2 - self.W_ICON / 2
self.Y_ICON = self.Y_ICON_PANEL + self.H_ICON_PANEL / 2 - self.H_ICON / 2
self.X_QUOTE = self.X_UPPER_PANEL + self.W_ICON_PANEL + (self.iMarginSpace * 2)
# self.Y_QUOTE = self.Y_UPPER_PANEL + self.iMarginSpace + 36
self.Y_QUOTE = self.Y_ICON
self.W_QUOTE = 725 # Spocko was 400
# self.H_QUOTE = self.H_UPPER_PANEL - (self.iMarginSpace * 2) - 38
self.H_QUOTE = self.H_UPPER_PANEL - (self.Y_QUOTE - self.Y_UPPER_PANEL) - (self.iMarginSpace * 2)
#---Geändert START - siehe original Datei -----------------
# Lower Panel
self.X_LOWER_PANEL = self.X_MAIN_PANEL + self.iMarginSpace
self.Y_LOWER_PANEL = self.Y_UPPER_PANEL + self.H_UPPER_PANEL
self.W_LOWER_PANEL = self.W_MAIN_PANEL - (self.iMarginSpace * 2)
self.H_LOWER_PANEL = 360
self.H_ALLOWS_PANEL = 80
self.H_ALLOWS_SPACE = 28
self.X_SPECIAL_PANEL = self.X_LOWER_PANEL + self.iMarginSpace
self.Y_SPECIAL_PANEL = self.Y_LOWER_PANEL + self.iMarginSpace + 20
self.W_SPECIAL_PANEL = self.W_LOWER_PANEL/2 - self.iMarginSpace
self.H_SPECIAL_PANEL = 2 * self.H_ALLOWS_PANEL + self.H_ALLOWS_SPACE
self.X_ALLOWS_PANELSIR = self.X_LOWER_PANEL + self.iMarginSpace
self.Y_ALLOWS_PANELSIR = self.Y_SPECIAL_PANEL + self.H_SPECIAL_PANEL + self.H_ALLOWS_SPACE
self.W_ALLOWS_PANELSIR = self.W_LOWER_PANEL/2 - (self.iMarginSpace)
self.H_ALLOWS_PANELSIR = 80
self.X_ALLOWS_PANEL = self.X_LOWER_PANEL + self.iMarginSpace + self.W_SPECIAL_PANEL
self.Y_ALLOWS_PANEL = self.Y_SPECIAL_PANEL
self.W_ALLOWS_PANEL = self.W_LOWER_PANEL/2 - (self.iMarginSpace)
self.Y_ALLOWS_PANEL2 = self.Y_SPECIAL_PANEL + self.H_ALLOWS_PANEL + self.H_ALLOWS_SPACE
self.Y_ALLOWS_PANEL3 = self.Y_SPECIAL_PANEL + 2 * (self.H_ALLOWS_PANEL + self.H_ALLOWS_SPACE)
#---Geändert ENDE ------------------------------------------
# Contents
self.X_EXIT = self.X_MAIN_PANEL + (self.W_MAIN_PANEL / 2) - 55
self.Y_EXIT = self.Y_MAIN_PANEL + self.H_MAIN_PANEL - 45
self.W_EXIT = 120
self.H_EXIT = 30
def interfaceScreen(self, iTech):
self.EXIT_TEXT = localText.getText("TXT_KEY_SCREEN_CONTINUE", ())
self.nTechs = CyGlobalContext().getNumTechInfos()
self.iTech = iTech
self.nWidgetCount = 0
# Create screen
screen = self.getScreen()
techInfo = CyGlobalContext().getTechInfo(self.iTech)
screen.setSound(techInfo.getSound())
screen.showScreen(PopupStates.POPUPSTATE_IMMEDIATE, False)
screen.enableWorldSounds( false )
# screen.setHelpTextArea(self.W_HELP_AREA, FontTypes.SMALL_FONT, self.X_UNIT_PANE, self.Y_UNIT_PANE, self.Z_HELP_AREA, 1, CyArtFileMgr().getInterfaceArtInfo("POPUPS_BACKGROUND_TRANSPARENT").getPath(), True, True, CvUtil.FONT_LEFT_JUSTIFY, 0 )
# screen.addDDSGFC( "TechSplashBackground", CyArtFileMgr().getInterfaceArtInfo("TEMP_BG").getPath(), 0, 0, self.W_SCREEN, self.H_SCREEN, WidgetTypes.WIDGET_GENERAL, -1, -1 )
screen.showWindowBackground( False )
screen.setDimensions(screen.centerX(0), screen.centerY(0), self.W_SCREEN, self.H_SCREEN)
# Create panels
# Main Panel
szMainPanel = "TechSplashMainPanel"
screen.addPanel( szMainPanel, "", "", true, true,
self.X_MAIN_PANEL, self.Y_MAIN_PANEL, self.W_MAIN_PANEL, self.H_MAIN_PANEL, PanelStyles.PANEL_STYLE_MAIN )
# Top Panel
szHeaderPanel = "TechSplashHeaderPanel"
screen.addPanel( szHeaderPanel, "", "", true, true,
self.X_UPPER_PANEL, self.Y_UPPER_PANEL, self.W_UPPER_PANEL, self.H_UPPER_PANEL, PanelStyles.PANEL_STYLE_DAWNBOTTOM )
screen.setStyle(szHeaderPanel, "Panel_DawnBottom_Style")
# Icon Panel
szIconPanel = "IconPanel"
screen.addPanel( szIconPanel, "", "", true, true,
self.X_ICON_PANEL, self.Y_ICON_PANEL, self.W_UPPER_PANEL-(self.iMarginSpace * 2), self.H_UPPER_PANEL-(self.iMarginSpace * 4), PanelStyles.PANEL_STYLE_MAIN_TAN15 )
screen.setStyle(szIconPanel, "Panel_TechDiscover_Style")
# Icon Panel
szIconPanel = "IconPanelGlow"
screen.addPanel( szIconPanel, "", "", true, true,
self.X_ICON_PANEL, self.Y_ICON_PANEL, self.W_ICON_PANEL, self.H_ICON_PANEL, PanelStyles.PANEL_STYLE_MAIN_TAN15 )
screen.setStyle(szIconPanel, "Panel_TechDiscoverGlow_Style")
# Bottom Panel
szTextPanel = "TechSplashTextPanel"
screen.addPanel( szTextPanel, "", "", true, true,
self.X_LOWER_PANEL+self.iMarginSpace, self.Y_LOWER_PANEL, self.W_LOWER_PANEL-(self.iMarginSpace * 2), self.H_LOWER_PANEL, PanelStyles.PANEL_STYLE_MAIN )
screen.setStyle(szTextPanel, "Panel_TanT_Style")
# Exit Button
screen.setButtonGFC("Exit", localText.getText("TXT_KEY_SCREEN_CONTINUE", ()), "", self.X_EXIT, self.Y_EXIT, self.W_EXIT , self.H_EXIT, WidgetTypes.WIDGET_CLOSE_SCREEN, -1, -1, ButtonStyles.BUTTON_STYLE_STANDARD )
# Special Panel
szSpecialPanel = "TechSplashSpecialPanel"
screen.addPanel( szSpecialPanel, "", "", true, true,
self.X_SPECIAL_PANEL+self.iMarginSpace, self.Y_SPECIAL_PANEL, self.W_SPECIAL_PANEL-(self.iMarginSpace * 2), self.H_SPECIAL_PANEL, PanelStyles.PANEL_STYLE_IN )
screen.setStyle(szSpecialPanel, "Panel_Black25_Style")
#---Eingefügt START - kann komplett gelöscht werden-----------------
# Allows PanelSIR
panelNameSIR = "SIR"
screen.addPanel( panelNameSIR, "", "", false, true,
self.X_ALLOWS_PANELSIR+self.iMarginSpace, self.Y_ALLOWS_PANELSIR, self.W_ALLOWS_PANELSIR-(self.iMarginSpace * 2), self.H_ALLOWS_PANELSIR, PanelStyles.PANEL_STYLE_IN )
screen.setStyle(panelNameSIR, "Panel_Black25_Style")
#---Eingefügt ENDE -------------------------------------------------
# Allows Panel
panelName = self.getNextWidgetName()
screen.addPanel( panelName, "", "", false, true,
self.X_ALLOWS_PANEL+self.iMarginSpace, self.Y_ALLOWS_PANEL, self.W_ALLOWS_PANEL-(self.iMarginSpace * 2), self.H_ALLOWS_PANEL, PanelStyles.PANEL_STYLE_IN )
screen.setStyle(panelName, "Panel_Black25_Style")
#---Eingefügt START - kann komplett gelöscht werden-----------------
# Allows Panel2
panelName2 = "SIR2"
screen.addPanel( panelName2, "", "", false, true,
self.X_ALLOWS_PANEL+self.iMarginSpace, self.Y_ALLOWS_PANEL2, self.W_ALLOWS_PANEL-(self.iMarginSpace * 2), self.H_ALLOWS_PANEL, PanelStyles.PANEL_STYLE_IN )
screen.setStyle(panelName2, "Panel_Black25_Style")
# Allows Panel3
panelName3 = "SIR3"
screen.addPanel( panelName3, "", "", false, true,
self.X_ALLOWS_PANEL+self.iMarginSpace, self.Y_ALLOWS_PANEL3, self.W_ALLOWS_PANEL-(self.iMarginSpace * 2), self.H_ALLOWS_PANEL, PanelStyles.PANEL_STYLE_IN )
screen.setStyle(panelName3, "Panel_Black25_Style")
#---Eingefügt ENDE -------------------------------------------------
# Add Contents
# Title
szTech = techInfo.getDescription()
screen.setLabel(self.getNextWidgetName(), "Background", u"<font=4>" + szTech.upper() + u"</font>", CvUtil.FONT_CENTER_JUSTIFY,
self.X_TITLE, self.Y_TITLE, self.Z_CONTROLS, FontTypes.GAME_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1 )
# Tech Icon
screen.addDDSGFC(self.getNextWidgetName(), techInfo.getButton(), self.X_ICON, self.Y_ICON, self.W_ICON, self.H_ICON, WidgetTypes.WIDGET_PEDIA_JUMP_TO_TECH, self.iTech, -1 )
# Tech Quote
szTechQuote = techInfo.getQuote()
iTextOffset = 0
if TechWindowOpt.isShowCivilopediaText():
szTechQuote += "\n\n" + techInfo.getCivilopedia()
else:
iTextOffset = 20
screen.addMultilineText( "Text", szTechQuote,
self.X_QUOTE, self.Y_QUOTE + iTextOffset, self.W_QUOTE, self.H_QUOTE - iTextOffset, WidgetTypes.WIDGET_GENERAL, -1, -1, CvUtil.FONT_LEFT_JUSTIFY)
# Special
szSpecialTitle = u"<font=3b>" + localText.getText("TXT_KEY_PEDIA_SPECIAL_ABILITIES", ()) + u"</font>"
szSpecialTitleWidget = "SpecialTitle"
screen.setText(szSpecialTitleWidget, "", szSpecialTitle, CvUtil.FONT_LEFT_JUSTIFY,
self.X_SPECIAL_PANEL+self.iMarginSpace, self.Y_SPECIAL_PANEL - 20, 0, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1)
listName = self.getNextWidgetName()
szSpecialText = CyGameTextMgr().getTechHelp(self.iTech, True, False, False, True, -1)[1:]
screen.addMultilineText(listName, szSpecialText, self.X_SPECIAL_PANEL+10, self.Y_SPECIAL_PANEL+5, self.W_SPECIAL_PANEL-20, self.H_SPECIAL_PANEL-20, WidgetTypes.WIDGET_GENERAL, -1, -1, CvUtil.FONT_LEFT_JUSTIFY)
#---Eingefügt START - kann komplett gelöscht werden --------------
# Allows -> LeadsTo
szAllowsTitleDescSIR = u"<font=3b>" + localText.getText("TXT_KEY_PEDIA_LEADS_TO", ()) + ":" + u"</font>"
szAllowsTitleWidgetSIR = "AllowsTitleSIR"
screen.setText(szAllowsTitleWidgetSIR, "", szAllowsTitleDescSIR, CvUtil.FONT_LEFT_JUSTIFY,
self.X_ALLOWS_PANELSIR+self.iMarginSpace, self.Y_ALLOWS_PANELSIR - 20, 0, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1)
for j in range(CyGlobalContext().getNumTechInfos()):
for k in range(CyGlobalContext().getDefineINT("NUM_OR_TECH_PREREQS")):
iPrereq = CyGlobalContext().getTechInfo(j).getPrereqOrTechs(k)
if (iPrereq == self.iTech):
screen.attachImageButton( panelNameSIR, "", CyGlobalContext().getTechInfo(j).getButton(), GenericButtonSizes.BUTTON_SIZE_CUSTOM, WidgetTypes.WIDGET_PEDIA_JUMP_TO_DERIVED_TECH, j, self.iTech, False )
for k in range(CyGlobalContext().getDefineINT("NUM_AND_TECH_PREREQS")):
iPrereq = CyGlobalContext().getTechInfo(j).getPrereqAndTechs(k)
if (iPrereq == self.iTech):
screen.attachImageButton( panelNameSIR, "", CyGlobalContext().getTechInfo(j).getButton(), GenericButtonSizes.BUTTON_SIZE_CUSTOM, WidgetTypes.WIDGET_PEDIA_JUMP_TO_DERIVED_TECH, j, self.iTech, False )
#---Eingefügt ENDE ------------------------------------------------
# Allows
# szAllowsTitleDesc = u"<font=3b>" + localText.getText("TXT_KEY_PEDIA_ALLOWS", ()) + ":" + u"</font>"
# szAllowsTitleWidget = "AllowsTitle"
# screen.setText(szAllowsTitleWidget, "", szAllowsTitleDesc, CvUtil.FONT_LEFT_JUSTIFY,
# self.X_ALLOWS_PANEL+self.iMarginSpace, self.Y_ALLOWS_PANEL - 20, 0, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1)
# Units Enabled
szUnitsTitleDesc = u"<font=3b>" + localText.getText("TXT_KEY_PEDIA_UNITS_ENABLED", ()) + ":" + u"</font>"
szUnitsTitleWidget = "UnitsTitle"
screen.setText(szUnitsTitleWidget, "", szUnitsTitleDesc, CvUtil.FONT_LEFT_JUSTIFY,
self.X_ALLOWS_PANEL + self.iMarginSpace, self.Y_ALLOWS_PANEL - 20, 0, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1)
for j in range( CyGlobalContext().getNumUnitClassInfos() ):
eLoopUnit = CyGlobalContext().getCivilizationInfo(CyGlobalContext().getGame().getActiveCivilizationType()).getCivilizationUnits(j)
if (eLoopUnit != -1):
if (isTechRequiredForUnit(self.iTech, eLoopUnit)):
screen.attachImageButton( panelName, "", CyGlobalContext().getUnitInfo(eLoopUnit).getButton(), GenericButtonSizes.BUTTON_SIZE_CUSTOM,
WidgetTypes.WIDGET_PEDIA_JUMP_TO_UNIT, eLoopUnit, 1, False )
# Buildings Enabled
szBuildingsTitleDesc = u"<font=3b>" + localText.getText("TXT_KEY_PEDIA_BUILDINGS_ENABLED", ()) + ":" + u"</font>"
szBuildingsTitleWidget = "BuildingsTitle"
screen.setText(szBuildingsTitleWidget, "", szBuildingsTitleDesc, CvUtil.FONT_LEFT_JUSTIFY,
self.X_ALLOWS_PANEL + self.iMarginSpace, self.Y_ALLOWS_PANEL2 - 20, 0, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1)
for j in range(CyGlobalContext().getNumBuildingClassInfos()):
bTechFound = 0
eLoopBuilding = CyGlobalContext().getCivilizationInfo(CyGlobalContext().getGame().getActiveCivilizationType()).getCivilizationBuildings(j)
if (eLoopBuilding != -1):
if (isTechRequiredForBuilding(self.iTech, eLoopBuilding)):
screen.attachImageButton( panelName2, "", CyGlobalContext().getBuildingInfo(eLoopBuilding).getButton(), GenericButtonSizes.BUTTON_SIZE_CUSTOM,
WidgetTypes.WIDGET_PEDIA_JUMP_TO_BUILDING, eLoopBuilding, 1, False )
# Improvements
szImprovesTitleDesc = u"<font=3b>" + localText.getText("TXT_KEY_PEDIA_CATEGORY_IMPROVEMENT", ()) + ":" + u"</font>"
szImprovesTitleWidget = "ImprovesTitle"
screen.setText(szImprovesTitleWidget, "", szImprovesTitleDesc, CvUtil.FONT_LEFT_JUSTIFY,
self.X_ALLOWS_PANEL + self.iMarginSpace, self.Y_ALLOWS_PANEL3 - 20, 0, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1)
for j in range(CyGlobalContext().getNumProjectInfos()):
bTechFound = 0
if (isTechRequiredForProject(self.iTech, j)):
screen.attachImageButton( panelName3, "", CyGlobalContext().getProjectInfo(j).getButton(), GenericButtonSizes.BUTTON_SIZE_CUSTOM,
WidgetTypes.WIDGET_PEDIA_JUMP_TO_PROJECT, j, 1, False )
for j in range(CyGlobalContext().getNumPromotionInfos()):
if (CyGlobalContext().getPromotionInfo(j).getTechPrereq() == self.iTech):
screen.attachImageButton( panelName3, "", CyGlobalContext().getPromotionInfo(j).getButton(), GenericButtonSizes.BUTTON_SIZE_CUSTOM,
WidgetTypes.WIDGET_PEDIA_JUMP_TO_PROMOTION, j, 1, False )
#---Eingefügt START - kann komplett gelöscht werden --------------
# Improvements
for j in range(CyGlobalContext().getNumBuildInfos()):
if gc.getBuildInfo(j).isGraphicalOnly(): continue
bTechFound = 0;
if (CyGlobalContext().getBuildInfo(j).getTechPrereq() == -1):
bTechFound = 0
for k in range(CyGlobalContext().getNumFeatureInfos()):
if (CyGlobalContext().getBuildInfo(j).getFeatureTech(k) == self.iTech):
bTechFound = 1
else:
if (CyGlobalContext().getBuildInfo(j).getTechPrereq() == self.iTech):
bTechFound = 1
if (bTechFound == 1):
if (CyGlobalContext().getBuildInfo(j).getImprovement() == -1):
screen.attachImageButton( panelName3, "", CyGlobalContext().getBuildInfo(j).getButton(), GenericButtonSizes.BUTTON_SIZE_CUSTOM, WidgetTypes.WIDGET_HELP_IMPROVEMENT, j, 1, False )
else:
screen.attachImageButton( panelName3, "", CyGlobalContext().getBuildInfo(j).getButton(), GenericButtonSizes.BUTTON_SIZE_CUSTOM, WidgetTypes.WIDGET_PEDIA_JUMP_TO_IMPROVEMENT, CyGlobalContext().getBuildInfo(j).getImprovement(), 1, False )
# Bonuses
for j in range(CyGlobalContext().getNumBonusInfos()):
if (CyGlobalContext().getBonusInfo(j).getTechReveal() == self.iTech):
screen.attachImageButton( panelName3, "", CyGlobalContext().getBonusInfo(j).getButton(), GenericButtonSizes.BUTTON_SIZE_CUSTOM,
WidgetTypes.WIDGET_PEDIA_JUMP_TO_BONUS, j, 1, False )
# Civic
for j in range(CyGlobalContext().getNumCivicInfos()):
if (CyGlobalContext().getCivicInfo(j).getTechPrereq() == self.iTech):
screen.attachImageButton( panelName3, "", CyGlobalContext().getCivicInfo(j).getButton(), GenericButtonSizes.BUTTON_SIZE_CUSTOM,
WidgetTypes.WIDGET_PEDIA_JUMP_TO_CIVIC, j, 1, False )
#---Eingefügt ENDE ------------------------------------------------
# returns a unique ID for a widget in this screen
def getNextWidgetName(self):
szName = self.WIDGET_ID + str(self.nWidgetCount * self.nTechs + self.iTech)
self.nWidgetCount += 1
return szName
# returns a unique ID for this screen
def getScreen(self):
screen = CyGInterfaceScreen(self.SCREEN_NAME + str(self.iTech), self.nScreenId)
return screen
def handleInput( self, inputClass ):
if ( inputClass.getData() == int(InputTypes.KB_RETURN) ):
self.getScreen().hideScreen()
return 1
if (inputClass.getNotifyCode() == NotifyCode.NOTIFY_CLICKED):
if (inputClass.getFunctionName() == self.EXIT_ID):
self.getScreen().hideScreen()
return 1
return 0
def update(self, fDelta):
return
|
a360ad1d542de9abbd79b399ac041dad4aea7beb
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/response/AlipayCommerceWaterUsertaskdetailBatchqueryResponse.py
|
992a8812f1f925b8485d1d3c53a62fd6c2981b10
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,947
|
py
|
AlipayCommerceWaterUsertaskdetailBatchqueryResponse.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.UserTaskDetailResponse import UserTaskDetailResponse
class AlipayCommerceWaterUsertaskdetailBatchqueryResponse(AlipayResponse):
def __init__(self):
super(AlipayCommerceWaterUsertaskdetailBatchqueryResponse, self).__init__()
self._data_list = None
self._page_no = None
self._page_size = None
self._total_size = None
@property
def data_list(self):
return self._data_list
@data_list.setter
def data_list(self, value):
if isinstance(value, list):
self._data_list = list()
for i in value:
if isinstance(i, UserTaskDetailResponse):
self._data_list.append(i)
else:
self._data_list.append(UserTaskDetailResponse.from_alipay_dict(i))
@property
def page_no(self):
return self._page_no
@page_no.setter
def page_no(self, value):
self._page_no = value
@property
def page_size(self):
return self._page_size
@page_size.setter
def page_size(self, value):
self._page_size = value
@property
def total_size(self):
return self._total_size
@total_size.setter
def total_size(self, value):
self._total_size = value
def parse_response_content(self, response_content):
response = super(AlipayCommerceWaterUsertaskdetailBatchqueryResponse, self).parse_response_content(response_content)
if 'data_list' in response:
self.data_list = response['data_list']
if 'page_no' in response:
self.page_no = response['page_no']
if 'page_size' in response:
self.page_size = response['page_size']
if 'total_size' in response:
self.total_size = response['total_size']
|
81de5a860093be3855178f8e5f924d30349323a8
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/HLTrigger/HLTanalyzers/test/harvesting.py
|
6860d433a0620dd8dd5ea2d08b5fcda9f06e325b
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 1,694
|
py
|
harvesting.py
|
process = cms.Process('HARVESTING')
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('Configuration.StandardSequences.GeometryDB_cff')
process.load('Configuration.StandardSequences.MagneticField_38T_cff')
process.load('Configuration.StandardSequences.EDMtoMEAtRunEnd_cff')
process.load('Configuration.StandardSequences.Harvesting_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(10)
)
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:./MyFirstDQMExample.root'),
processingMode = cms.untracked.string('RunsAndLumis')
)
process.options = cms.untracked.PSet(
Rethrow = cms.untracked.vstring('ProductNotFound'),
fileMode = cms.untracked.string('FULLMERGE')
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$Revision: 1.2 $'),
annotation = cms.untracked.string('test_11_b_1 nevts:1'),
name = cms.untracked.string('PyReleaseValidation')
)
# Output definition
# Additional output definition
# Other statements
process.GlobalTag.globaltag = 'GR_R_44_V7::All'
# Path and EndPath definitions
process.edmtome_step = cms.Path(process.EDMtoME)
process.dqmsave_step = cms.Path(process.DQMSaver)
# Schedule definition
process.schedule = cms.Schedule(process.edmtome_step,process.dqmsave_step)
|
4304caf1d3e3108de4ee8e2e43bb8a9027b3befe
|
1577e1cf4e89584a125cffb855ca50a9654c6d55
|
/pyobjc/pyobjc/pyobjc-framework-Cocoa-2.5.1/PyObjCTest/test_nspointerfunctions.py
|
e9c883f49dfb7f13e115888ca81a577e44b9550d
|
[
"MIT"
] |
permissive
|
apple-open-source/macos
|
a4188b5c2ef113d90281d03cd1b14e5ee52ebffb
|
2d2b15f13487673de33297e49f00ef94af743a9a
|
refs/heads/master
| 2023-08-01T11:03:26.870408
| 2023-03-27T00:00:00
| 2023-03-27T00:00:00
| 180,595,052
| 124
| 24
| null | 2022-12-27T14:54:09
| 2019-04-10T14:06:23
| null |
UTF-8
|
Python
| false
| false
| 1,559
|
py
|
test_nspointerfunctions.py
|
from PyObjCTools.TestSupport import *
from Foundation import *
class TestNSPointerFunctions (TestCase):
def testConstants(self):
self.assertEqual(NSPointerFunctionsStrongMemory, (0 << 0))
self.assertEqual(NSPointerFunctionsZeroingWeakMemory, (1 << 0))
self.assertEqual(NSPointerFunctionsOpaqueMemory, (2 << 0))
self.assertEqual(NSPointerFunctionsMallocMemory, (3 << 0))
self.assertEqual(NSPointerFunctionsMachVirtualMemory, (4 << 0))
self.assertEqual(NSPointerFunctionsObjectPersonality, (0 << 8))
self.assertEqual(NSPointerFunctionsOpaquePersonality, (1 << 8))
self.assertEqual(NSPointerFunctionsObjectPointerPersonality, (2 << 8))
self.assertEqual(NSPointerFunctionsCStringPersonality, (3 << 8))
self.assertEqual(NSPointerFunctionsStructPersonality, (4 << 8))
self.assertEqual(NSPointerFunctionsIntegerPersonality, (5 << 8))
self.assertEqual(NSPointerFunctionsCopyIn, (1 << 16))
@min_os_level('10.8')
def testConstants10_8(self):
self.assertEqual(NSPointerFunctionsWeakMemory, 5<<0)
def testPropType(self):
o = NSPointerFunctions.alloc().initWithOptions_(0)
v = o.usesStrongWriteBarrier()
self.assertTrue((v is True) or (v is False) )
self.assertArgIsBOOL(o.setUsesStrongWriteBarrier_, 0)
self.assertArgIsBOOL(o.setUsesWeakReadAndWriteBarriers_, 0)
v = o.usesWeakReadAndWriteBarriers()
self.assertTrue((v is True) or (v is False) )
if __name__ == "__main__":
main()
|
0777ed92c87502b8bce7533b8147dd1876c702ad
|
98f1a0bfa5b20a0b81e9e555d76e706c62d949c9
|
/dglgo/dglgo/model/node_encoder/sgc.py
|
c46e4ba23ff86d1ef0b7a416aa3979bb6bdc47ce
|
[
"Apache-2.0"
] |
permissive
|
dmlc/dgl
|
3a8fbca3a7f0e9adf6e69679ad62948df48dfc42
|
bbc8ff6261f2e0d2b5982e992b6fbe545e2a4aa1
|
refs/heads/master
| 2023-08-31T16:33:21.139163
| 2023-08-31T07:49:22
| 2023-08-31T07:49:22
| 130,375,797
| 12,631
| 3,482
|
Apache-2.0
| 2023-09-14T15:48:24
| 2018-04-20T14:49:09
|
Python
|
UTF-8
|
Python
| false
| false
| 1,718
|
py
|
sgc.py
|
import dgl.function as fn
import torch.nn as nn
import torch.nn.functional as F
from dgl.base import dgl_warning
from dgl.nn import SGConv
class SGC(nn.Module):
def __init__(self, data_info: dict, embed_size: int = -1, bias=True, k=2):
"""Simplifying Graph Convolutional Networks
Edge feature is ignored in this model.
Parameters
----------
data_info : dict
The information about the input dataset.
embed_size : int
The dimension of created embedding table. -1 means using original node embedding
bias : bool
If True, adds a learnable bias to the output. Default: ``True``.
k : int
Number of hops :math:`K`. Defaults:``1``.
"""
super().__init__()
self.data_info = data_info
self.out_size = data_info["out_size"]
self.embed_size = embed_size
if embed_size > 0:
self.embed = nn.Embedding(data_info["num_nodes"], embed_size)
in_size = embed_size
else:
in_size = data_info["in_size"]
self.sgc = SGConv(
in_size,
self.out_size,
k=k,
cached=True,
bias=bias,
norm=self.normalize,
)
def forward(self, g, node_feat, edge_feat=None):
if self.embed_size > 0:
dgl_warning(
"The embedding for node feature is used, and input node_feat is ignored, due to the provided embed_size."
)
h = self.embed.weight
else:
h = node_feat
return self.sgc(g, h)
@staticmethod
def normalize(h):
return (h - h.mean(0)) / (h.std(0) + 1e-5)
|
d43f2e7108a50796fa7da13b7760e54cc5ea6f26
|
fa3312eb1e6ca712d7b4d9841e93e714864a238a
|
/apis/v1/boston.py
|
967f624bdac0be843123d091a724cb16ec372546
|
[
"MIT"
] |
permissive
|
shaz13/katana
|
9684ea69c1cffa5bb5a39e87672aa9038633cf29
|
1f1b49d66c62d81b1b5fd8be31fd3dcf8fdfd725
|
refs/heads/develop
| 2023-01-05T17:12:15.817365
| 2022-12-21T17:48:54
| 2022-12-21T17:48:54
| 246,100,187
| 120
| 35
| null | 2022-08-03T14:23:15
| 2020-03-09T17:31:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,274
|
py
|
boston.py
|
import uuid
from loguru import logger
from fastapi.routing import APIRouter
from apis.models.base import TrainingStatusResponse
from apis.models.house import BostonHouseRequestModel, BostonHouseResponseModel
from core.trainer import BostonHousePriceTrainerInstance
router = APIRouter(prefix="/boston")
# Load trained model. Dummy model being trained on startup...
logger.info("Training/Loading iris classification model")
trainer = BostonHousePriceTrainerInstance()
boston_model = trainer.train()
logger.info("Training completed")
@router.post(
"/trainModel", tags=["boston"], response_model=TrainingStatusResponse
)
async def boston_train():
training_id = uuid.uuid1()
# Queue training / start training via RabbitMQ, Queue, etc..
# Add task here
# Track the id in a database
return {
"trainingId": str(training_id),
"status": "Training started",
}
@router.post(
"/predictPrice", tags=["boston"], response_model=BostonHouseResponseModel
)
async def boston_price_prediction(body: BostonHouseRequestModel):
request = body.dict()
payload = [x for x in request.values()]
prediction = boston_model.predict([payload])
result = {"predictionId": str(uuid.uuid1()), "predictedPrice": prediction}
return result
|
a3daa8e11edfcf3b37c788589699ab1ec5622d66
|
5849e622803db1f388ad7dd24a1430abe86ecf05
|
/test/test_stats.py
|
c97e3c98106fb5aeb531811dbf62ce1c53dbdba4
|
[
"BSD-3-Clause"
] |
permissive
|
facebookresearch/PyTorch-BigGraph
|
8630ad17dd57574229741b26948ebdff1416d3b2
|
ab12fb79c6da4f5821421153c0e8a6e9ab6808b6
|
refs/heads/main
| 2023-08-26T17:59:38.324736
| 2023-08-25T22:31:20
| 2023-08-25T22:31:20
| 151,156,413
| 3,479
| 495
|
NOASSERTION
| 2022-10-06T20:31:55
| 2018-10-01T20:41:16
|
Python
|
UTF-8
|
Python
| false
| false
| 1,197
|
py
|
test_stats.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE.txt file in the root directory of this source tree.
from unittest import main, TestCase
from torchbiggraph.stats import Stats
class TestConfig(TestCase):
def test_sum(self):
a = Stats(my_int_metric=1, my_float_metric=0.1, count=1)
b = Stats(my_int_metric=2, my_float_metric=0.0, count=2)
c = Stats(my_int_metric=0, my_float_metric=0.2, count=2)
self.assertEqual(
Stats.sum([a, b, c]),
Stats(my_int_metric=3, my_float_metric=0.30000000000000004, count=5),
)
def test_average(self):
total = Stats(my_int_metric=9, my_float_metric=1.2, count=3)
self.assertEqual(
total.average(),
Stats(my_int_metric=3, my_float_metric=0.39999999999999997, count=3),
)
def test_str(self):
self.assertEqual(
str(Stats(my_int_metric=1, my_float_metric=0.2, count=3)),
"my_int_metric: 1 , my_float_metric: 0.2 , count: 3",
)
if __name__ == "__main__":
main()
|
a595222282cbd18d4fddc9ebf50ea6517290346d
|
374b3f27fe3cf032e88eccac5992c83eba0ad1b2
|
/tutorials/W3D5_NetworkCausality/solutions/W3D5_Tutorial1_Solution_fb7d91ed.py
|
ff9d40e298f51b3c40e669b5f1a23b9db4f89188
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] |
permissive
|
NeuromatchAcademy/course-content
|
e2fdca96bcbdc78afaa209e4e77438f44a56c82d
|
3d638d00f02d9fd269fa2aff7d062558afdcb126
|
refs/heads/main
| 2023-08-16T16:09:09.314153
| 2023-08-02T06:21:49
| 2023-08-02T06:21:49
| 262,856,980
| 2,678
| 1,079
|
CC-BY-4.0
| 2023-08-17T00:32:24
| 2020-05-10T19:09:05
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,859
|
py
|
W3D5_Tutorial1_Solution_fb7d91ed.py
|
def get_perturbed_connectivity_from_single_neuron(perturbed_X, selected_neuron):
"""
Computes the connectivity matrix from the selected neuron using differences in means.
Args:
perturbed_X (np.ndarray): the perturbed dynamical system matrix of shape
(n_neurons, timesteps)
selected_neuron (int): the index of the neuron we want to estimate connectivity for
Returns:
estimated_connectivity (np.ndarray): estimated connectivity for the selected neuron,
of shape (n_neurons,)
"""
# Extract the perturbations of neuron 1 (every other timestep)
neuron_perturbations = perturbed_X[selected_neuron, ::2]
# Extract the observed outcomes of all the neurons (every other timestep)
all_neuron_output = perturbed_X[:, 1::2]
# Initialize estimated connectivity matrix
estimated_connectivity = np.zeros(n_neurons)
# Loop over neurons
for neuron_idx in range(n_neurons):
# Get this output neurons (neuron_idx) activity
this_neuron_output = all_neuron_output[neuron_idx, :]
# Get timesteps where the selected neuron == 0 vs == 1
one_idx = np.argwhere(neuron_perturbations == 1)
zero_idx = np.argwhere(neuron_perturbations == 0)
difference_in_means = np.mean(this_neuron_output[one_idx]) - np.mean(this_neuron_output[zero_idx])
estimated_connectivity[neuron_idx] = difference_in_means
return estimated_connectivity
# Initialize the system
n_neurons = 6
timesteps = 5000
selected_neuron = 1
# Simulate our perturbed dynamical system
perturbed_X = simulate_neurons_perturb(A, timesteps)
# Measure connectivity of neuron 1
estimated_connectivity = get_perturbed_connectivity_from_single_neuron(perturbed_X, selected_neuron)
with plt.xkcd():
plot_true_vs_estimated_connectivity(estimated_connectivity, A, selected_neuron)
|
f8b53b6b3e7298b431addcf096be2cd48c0e254a
|
7f620e7902c0b9ccb1fcfd1427acd5936ea33814
|
/mlrun/api/utils/db/backup.py
|
dc374418eb30665fca099c03094d982f9d2d2681
|
[
"Apache-2.0"
] |
permissive
|
mlrun/mlrun
|
2074c230070129ce3becb211b92c90b29a2ce850
|
b5fe0c05ae7f5818a4a5a5a40245c851ff9b2c77
|
refs/heads/development
| 2023-09-06T00:09:21.546135
| 2023-09-05T19:38:13
| 2023-09-05T19:38:13
| 205,706,595
| 1,093
| 229
|
Apache-2.0
| 2023-09-14T14:14:10
| 2019-09-01T16:59:19
|
Python
|
UTF-8
|
Python
| false
| false
| 8,214
|
py
|
backup.py
|
# Copyright 2023 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import os
import pathlib
import shutil
import subprocess
import typing
import mlrun.api.utils.db.mysql
from mlrun import mlconf
from mlrun.utils import logger
class DBBackupUtil(object):
def __init__(
self,
backup_file_format: str = mlconf.httpdb.db.backup.file_format,
backup_rotation: bool = mlconf.httpdb.db.backup.use_rotation,
backup_rotation_limit: int = mlconf.httpdb.db.backup.rotation_limit,
) -> None:
self._backup_file_format = backup_file_format
self._backup_rotation = backup_rotation
self._backup_rotation_limit = backup_rotation_limit
def backup_database(self, backup_file_name: str = None) -> None:
backup_file_name = backup_file_name or self._generate_backup_file_name()
# ensure the backup directory exists
self._get_db_dir_path().mkdir(parents=True, exist_ok=True)
if ":memory:" in mlconf.httpdb.dsn:
return
elif "mysql" in mlconf.httpdb.dsn:
self._backup_database_mysql(backup_file_name)
else:
self._backup_database_sqlite(backup_file_name)
if self._backup_rotation:
self._rotate_backup()
def load_database_from_backup(
self, backup_file_name: str, new_backup_file_name: str = None
) -> None:
new_backup_file_name = new_backup_file_name or self._generate_backup_file_name()
backup_path = self._get_backup_file_path(backup_file_name)
if not backup_path or not os.path.isfile(backup_path):
raise RuntimeError(
f"Cannot load backup from {backup_file_name}, file doesn't exist"
)
# backup the current DB
self.backup_database(new_backup_file_name)
if ":memory:" in mlconf.httpdb.dsn:
return
elif "mysql" in mlconf.httpdb.dsn:
self._load_database_backup_mysql(backup_file_name)
else:
self._load_database_backup_sqlite(backup_file_name)
def _backup_database_sqlite(self, backup_file_name: str) -> None:
db_file_path = self._get_sqlite_db_file_path()
backup_path = self._get_backup_file_path(backup_file_name)
logger.debug(
"Backing up sqlite DB file",
db_file_path=db_file_path,
backup_path=backup_path,
)
shutil.copy2(db_file_path, backup_path)
def _load_database_backup_sqlite(self, backup_file_name: str) -> None:
db_file_path = self._get_sqlite_db_file_path()
backup_path = self._get_backup_file_path(backup_file_name)
logger.debug(
"Loading sqlite DB backup file",
db_file_path=db_file_path,
backup_path=backup_path,
)
shutil.copy2(backup_path, db_file_path)
def _backup_database_mysql(self, backup_file_name: str) -> None:
backup_path = self._get_backup_file_path(backup_file_name)
logger.debug("Backing up mysql DB data", backup_path=backup_path)
dsn_data = mlrun.api.utils.db.mysql.MySQLUtil.get_mysql_dsn_data()
self._run_shell_command(
"mysqldump --single-transaction --routines --triggers "
f"--max_allowed_packet={mlconf.httpdb.db.backup.max_allowed_packet} "
f"-h {dsn_data['host']} "
f"-P {dsn_data['port']} "
f"-u {dsn_data['username']} "
f"{dsn_data['database']} > {backup_path}"
)
def _load_database_backup_mysql(self, backup_file_name: str) -> None:
"""
To run this operation manually, you can either run the command below from the mlrun-api pod or
enter the mysql pod and run:
mysql -S /var/run/mysqld/mysql.sock -p mlrun < FILE_PATH
"""
backup_path = self._get_backup_file_path(backup_file_name)
logger.debug(
"Loading mysql DB backup data",
backup_path=backup_path,
)
dsn_data = mlrun.api.utils.db.mysql.MySQLUtil.get_mysql_dsn_data()
self._run_shell_command(
"mysql "
f"-h {dsn_data['host']} "
f"-P {dsn_data['port']} "
f"-u {dsn_data['username']} "
f"{dsn_data['database']} < {backup_path}"
)
def _rotate_backup(self) -> None:
db_dir_path = self._get_db_dir_path()
dir_content = os.listdir(db_dir_path)
backup_files = []
for file_name in dir_content:
try:
date_metadata = datetime.datetime.strptime(
file_name, self._backup_file_format
)
except ValueError:
continue
backup_files.append((file_name, date_metadata))
if len(backup_files) <= self._backup_rotation_limit:
return
backup_files = sorted(backup_files, key=lambda file_data: file_data[1])
files_to_delete = [
file_data[0] for file_data in backup_files[: -self._backup_rotation_limit]
]
logger.debug("Rotating old backup files", files_to_delete=files_to_delete)
for file_name in files_to_delete:
try:
os.remove(db_dir_path / file_name)
except FileNotFoundError:
logger.debug(
"Backup file doesn't exist, skipping...", file_name=file_name
)
def _generate_backup_file_name(self) -> str:
return datetime.datetime.now(tz=datetime.timezone.utc).strftime(
self._backup_file_format
)
def _get_backup_file_path(
self, backup_file_name: str
) -> typing.Optional[pathlib.Path]:
if ":memory:" in mlconf.httpdb.dsn:
return
return self._get_db_dir_path() / backup_file_name
def _get_db_dir_path(self) -> typing.Optional[pathlib.Path]:
if ":memory:" in mlconf.httpdb.dsn:
return
elif "mysql" in mlconf.httpdb.dsn:
db_dir_path = pathlib.Path(mlconf.httpdb.dirpath) / "mysql"
else:
db_file_path = self._get_sqlite_db_file_path()
db_dir_path = pathlib.Path(os.path.dirname(db_file_path))
return db_dir_path
@staticmethod
def _get_sqlite_db_file_path() -> str:
"""
Get the db file path from the dsn.
Converts the dsn to the file path. e.g.:
sqlite:////mlrun/db/mlrun.db?check_same_thread=false -> /mlrun/db/mlrun.db
if mysql is used returns empty string
"""
return mlconf.httpdb.dsn.split("?")[0].split("sqlite:///")[-1]
@staticmethod
def _run_shell_command(command: str) -> int:
logger.debug(
"Running shell command",
command=command,
)
process = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
shell=True,
)
stdout = process.stdout.read()
stderr = process.stderr.read()
return_code = process.wait()
if return_code != 0:
logger.error(
"Failed running shell command",
command=command,
stdout=stdout,
stderr=stderr,
exit_status=return_code,
)
raise RuntimeError(
f"Got non-zero return code ({return_code}) on running shell command: {command}"
)
logger.debug(
"Ran command successfully",
command=command,
stdout=stdout,
stderr=stderr,
exit_status=return_code,
)
return return_code
|
7600914a5ec54b52cf5d2b9dda231cd3f267239d
|
a641c7b07e91024f166ee25ee2d07874a704f40c
|
/train/train_TSP_edge_classification.py
|
180fb54390fb0245c2c67f007b60715dcae08eac
|
[
"MIT"
] |
permissive
|
graphdeeplearning/benchmarking-gnns
|
c901c87bf9441ff6a8f7427b2d0e3f5bcfd9489b
|
b6c407712fa576e9699555e1e035d1e327ccae6c
|
refs/heads/master
| 2023-07-31T21:35:04.971051
| 2022-05-10T13:22:20
| 2022-05-10T13:22:20
| 244,534,808
| 2,340
| 468
|
MIT
| 2023-06-22T04:03:54
| 2020-03-03T03:42:50
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,021
|
py
|
train_TSP_edge_classification.py
|
"""
Utility functions for training one epoch
and evaluating one epoch
"""
import torch
import torch.nn as nn
import math
import dgl
from train.metrics import binary_f1_score
"""
For GCNs
"""
def train_epoch_sparse(model, optimizer, device, data_loader, epoch):
model.train()
epoch_loss = 0
epoch_train_f1 = 0
nb_data = 0
gpu_mem = 0
for iter, (batch_graphs, batch_labels) in enumerate(data_loader):
batch_graphs = batch_graphs.to(device)
batch_x = batch_graphs.ndata['feat'].to(device) # num x feat
batch_e = batch_graphs.edata['feat'].to(device)
batch_labels = batch_labels.to(device)
optimizer.zero_grad()
batch_scores = model.forward(batch_graphs, batch_x, batch_e)
loss = model.loss(batch_scores, batch_labels)
loss.backward()
optimizer.step()
epoch_loss += loss.detach().item()
epoch_train_f1 += binary_f1_score(batch_scores, batch_labels)
epoch_loss /= (iter + 1)
epoch_train_f1 /= (iter + 1)
return epoch_loss, epoch_train_f1, optimizer
def evaluate_network_sparse(model, device, data_loader, epoch):
model.eval()
epoch_test_loss = 0
epoch_test_f1 = 0
nb_data = 0
with torch.no_grad():
for iter, (batch_graphs, batch_labels) in enumerate(data_loader):
batch_graphs = batch_graphs.to(device)
batch_x = batch_graphs.ndata['feat'].to(device)
batch_e = batch_graphs.edata['feat'].to(device)
batch_labels = batch_labels.to(device)
batch_scores = model.forward(batch_graphs, batch_x, batch_e)
loss = model.loss(batch_scores, batch_labels)
epoch_test_loss += loss.detach().item()
epoch_test_f1 += binary_f1_score(batch_scores, batch_labels)
epoch_test_loss /= (iter + 1)
epoch_test_f1 /= (iter + 1)
return epoch_test_loss, epoch_test_f1
"""
For WL-GNNs
"""
def train_epoch_dense(model, optimizer, device, data_loader, epoch, batch_size):
model.train()
epoch_loss = 0
epoch_train_f1 = 0
nb_data = 0
gpu_mem = 0
optimizer.zero_grad()
for iter, (x_no_edge_feat, x_with_edge_feat, labels, edge_list) in enumerate(data_loader):
if x_no_edge_feat is not None:
x_no_edge_feat = x_no_edge_feat.to(device)
if x_with_edge_feat is not None:
x_with_edge_feat = x_with_edge_feat.to(device)
labels = labels.to(device)
edge_list = edge_list[0].to(device), edge_list[1].to(device)
scores = model.forward(x_no_edge_feat, x_with_edge_feat, edge_list)
loss = model.loss(scores, labels)
loss.backward()
if not (iter%batch_size):
optimizer.step()
optimizer.zero_grad()
epoch_loss += loss.detach().item()
epoch_train_f1 += binary_f1_score(scores, labels)
epoch_loss /= (iter + 1)
epoch_train_f1 /= (iter + 1)
return epoch_loss, epoch_train_f1, optimizer
def evaluate_network_dense(model, device, data_loader, epoch):
model.eval()
epoch_test_loss = 0
epoch_test_f1 = 0
nb_data = 0
with torch.no_grad():
for iter, (x_no_edge_feat, x_with_edge_feat, labels, edge_list) in enumerate(data_loader):
if x_no_edge_feat is not None:
x_no_edge_feat = x_no_edge_feat.to(device)
if x_with_edge_feat is not None:
x_with_edge_feat = x_with_edge_feat.to(device)
labels = labels.to(device)
edge_list = edge_list[0].to(device), edge_list[1].to(device)
scores = model.forward(x_no_edge_feat, x_with_edge_feat, edge_list)
loss = model.loss(scores, labels)
epoch_test_loss += loss.detach().item()
epoch_test_f1 += binary_f1_score(scores, labels)
epoch_test_loss /= (iter + 1)
epoch_test_f1 /= (iter + 1)
return epoch_test_loss, epoch_test_f1
|
6c97938be872748aa22944a8360ee1676ca1cd25
|
620323fc090cebaf7aca456ff3f7fbbe1e210394
|
/print__hprof_or_big_size_file__notify_with_MessageBox.py
|
a07b65b5676ba051139643cf723fd510a39f6e63
|
[
"CC-BY-4.0"
] |
permissive
|
gil9red/SimplePyScripts
|
bd2733372728bf9b9f00570e90316fa12116516b
|
773c2c9724edd8827a1dbd91694d780e03fcb05a
|
refs/heads/master
| 2023-08-31T04:26:09.120173
| 2023-08-30T17:22:59
| 2023-08-30T17:22:59
| 22,650,442
| 157
| 46
| null | 2023-09-08T17:51:33
| 2014-08-05T16:19:52
|
Python
|
UTF-8
|
Python
| false
| false
| 1,454
|
py
|
print__hprof_or_big_size_file__notify_with_MessageBox.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "ipetrash"
import re
import time
from pathlib import Path
from PyQt5.QtWidgets import QApplication, QMessageBox
from PyQt5.QtCore import Qt
from print__hprof_or_big_size_file import find_files_by_dirs, DIRS
if __name__ == "__main__":
app = QApplication([])
while True:
result = find_files_by_dirs(DIRS)
if not result:
continue
text = f"Files .hprof ({len(result)}):\n" + "\n".join(result)
msg_box = QMessageBox(QMessageBox.Information, "Found .hprof!", text)
msg_box.setTextInteractionFlags(Qt.TextSelectableByMouse)
remove_all_files_button = msg_box.addButton(
"Remove all files", QMessageBox.DestructiveRole
)
msg_box.addButton(QMessageBox.Ok)
msg_box.exec()
if msg_box.clickedButton() == remove_all_files_button:
for file_name in result:
# "C:\DEV\trunk\java_pid12636.hprof" 6.1 GB (6603419857 bytes) -> C:\DEV\trunk\java_pid12636.hprof
m = re.search('"(.+?)"', file_name)
if m:
file_name = m.group(1)
try:
Path(file_name).unlink()
except Exception as e:
QMessageBox.warning(
None, "Warning", f"Error while removed {file_name!r}: {e}"
)
time.sleep(5 * 60 * 60)
|
a35beae913c0383d29a1617079ace45585180d3a
|
360ae1188ad79e71ccc72da0b9ae709bda678f91
|
/ryu/services/protocols/bgp/core.py
|
0f6fe400b720cc15e4378fc2f89a51b5648abc4c
|
[
"Apache-2.0"
] |
permissive
|
faucetsdn/ryu
|
47b3523e7ccb381f3bdf2877a3f9f01cb1876054
|
d6cda4f427ff8de82b94c58aa826824a106014c2
|
refs/heads/master
| 2023-09-05T06:37:21.991029
| 2022-06-09T23:09:40
| 2022-06-09T23:09:40
| 2,945,007
| 385
| 215
|
Apache-2.0
| 2022-11-13T10:50:25
| 2011-12-09T03:43:50
|
Python
|
UTF-8
|
Python
| false
| false
| 18,825
|
py
|
core.py
|
# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Core of BGPSpeaker.
Provides CoreService which is responsible for establishing bgp sessions with
peers and maintains VRFs and Global tables.
"""
import logging
import netaddr
import socket
from ryu.lib.packet.bgp import BGP_ERROR_CEASE
from ryu.lib.packet.bgp import BGP_ERROR_SUB_CONNECTION_RESET
from ryu.lib.packet.bgp import BGP_ERROR_SUB_CONNECTION_COLLISION_RESOLUTION
from ryu.lib.packet.bgp import RF_RTC_UC
from ryu.lib.packet.bgp import BGP_ATTR_ORIGIN_INCOMPLETE
from ryu.services.protocols.bgp.base import Activity
from ryu.services.protocols.bgp.base import add_bgp_error_metadata
from ryu.services.protocols.bgp.base import BGPSException
from ryu.services.protocols.bgp.base import CORE_ERROR_CODE
from ryu.services.protocols.bgp.constants import STD_BGP_SERVER_PORT_NUM
from ryu.services.protocols.bgp import core_managers
from ryu.services.protocols.bgp.model import FlexinetOutgoingRoute
from ryu.services.protocols.bgp.protocol import Factory
from ryu.services.protocols.bgp.signals.emit import BgpSignalBus
from ryu.services.protocols.bgp.speaker import BgpProtocol
from ryu.services.protocols.bgp.utils.rtfilter import RouteTargetManager
from ryu.services.protocols.bgp.rtconf.neighbors import CONNECT_MODE_ACTIVE
from ryu.services.protocols.bgp.utils import stats
from ryu.services.protocols.bgp.bmp import BMPClient
from ryu.lib import sockopt
from ryu.lib import ip
LOG = logging.getLogger('bgpspeaker.core')
# Interface IP address on which to run bgp server. Core service listens on all
# interfaces of the host on port 179 - standard bgp port.
CORE_IP = '::'
# Required dictates that Origin attribute be incomplete
EXPECTED_ORIGIN = BGP_ATTR_ORIGIN_INCOMPLETE
@add_bgp_error_metadata(code=CORE_ERROR_CODE, sub_code=1,
def_desc='Unknown error occurred related to core.')
class BgpCoreError(BGPSException):
"""Base exception related to all tables and peer management.
"""
pass
class CoreService(Factory, Activity):
"""A service that maintains eBGP/iBGP sessions with BGP peers.
Two instances of this class don't share any BGP state with each
other. Manages peers, tables for various address-families, etc.
"""
protocol = BgpProtocol
def __init__(self, common_conf, neighbors_conf, vrfs_conf):
self._common_config = common_conf
self._neighbors_conf = neighbors_conf
self._vrfs_conf = vrfs_conf
Activity.__init__(self, name='core_service')
self._signal_bus = BgpSignalBus()
self._init_signal_listeners()
self._rt_mgr = RouteTargetManager(self, neighbors_conf, vrfs_conf)
self._table_manager = core_managers.TableCoreManager(
self, common_conf
)
self._importmap_manager = core_managers.ImportMapManager()
# Autonomous system number of this BGP speaker.
self._asn = self._common_config.local_as
self._peer_manager = core_managers.PeerManager(
self,
self._neighbors_conf,
)
# Initialize sink for flexinet-peers
self._sinks = set()
self._conf_manager = core_managers.ConfigurationManager(
self, common_conf, vrfs_conf, neighbors_conf
)
# Register Flexinet peer sink
from ryu.services.protocols.bgp.net_ctrl import NET_CONTROLLER
self.register_flexinet_sink(NET_CONTROLLER)
# State per route family
# Key: RouteFamily
# Value: BgpInstanceRf
self.rf_state = {}
# Protocol factories for pro-active and re-active bgp-sessions.
self.client_factory = None
self.server_factory = None
# Key: RD:Next_Hop
# Value: label
self._next_hop_label = {}
# BgpProcessor instance (initialized during start)
self._bgp_processor = None
# BMP clients key: (host, port) value: BMPClient instance
self.bmpclients = {}
def _init_signal_listeners(self):
self._signal_bus.register_listener(
BgpSignalBus.BGP_DEST_CHANGED,
lambda _, dest: self.enqueue_for_bgp_processing(dest)
)
self._signal_bus.register_listener(
BgpSignalBus.BGP_VRF_REMOVED,
lambda _, route_dist: self.on_vrf_removed(route_dist)
)
self._signal_bus.register_listener(
BgpSignalBus.BGP_VRF_ADDED,
lambda _, vrf_conf: self.on_vrf_added(vrf_conf)
)
self._signal_bus.register_listener(
BgpSignalBus.BGP_VRF_STATS_CONFIG_CHANGED,
lambda _, vrf_conf: self.on_stats_config_change(vrf_conf)
)
@property
def router_id(self):
return self._common_config.router_id
@property
def global_interested_rts(self):
return self._rt_mgr.global_interested_rts
@property
def asn(self):
return self._asn
@property
def table_manager(self):
return self._table_manager
@property
def importmap_manager(self):
return self._importmap_manager
@property
def peer_manager(self):
return self._peer_manager
@property
def rt_manager(self):
return self._rt_mgr
@property
def signal_bus(self):
return self._signal_bus
def enqueue_for_bgp_processing(self, dest):
return self._bgp_processor.enqueue(dest)
def on_vrf_removed(self, route_dist):
# Remove stats timer linked with this vrf.
vrf_stats_timer = self._timers.get(route_dist)
if vrf_stats_timer:
vrf_stats_timer.stop()
del self._timers[route_dist]
def on_vrf_added(self, vrf_conf):
# Setup statistics timer.
rd = vrf_conf.route_dist
rf = vrf_conf.route_family
vrf_table = self._table_manager.get_vrf_table(rd, rf)
vrf_stats_timer = self._create_timer(
rd,
stats.log,
stats_source=vrf_table.get_stats_summary_dict
)
# Start statistics timer if applicable.
if vrf_conf.stats_log_enabled:
vrf_stats_timer.start(vrf_conf.stats_time)
def on_stats_config_change(self, vrf_conf):
vrf_stats_timer = self._timers.get(
vrf_conf.route_dist
)
vrf_stats_timer.stop()
vrf_stats_timer.start(vrf_conf.stats_time)
def _run(self, *args, **kwargs):
from ryu.services.protocols.bgp.processor import BgpProcessor
# Initialize bgp processor.
self._bgp_processor = BgpProcessor(self)
# Start BgpProcessor in a separate thread.
processor_thread = self._spawn_activity(self._bgp_processor)
# Pro-actively try to establish bgp-session with peers.
for peer in self._peer_manager.iterpeers:
self._spawn_activity(peer, self.start_protocol)
# Reactively establish bgp-session with peer by listening on
# the given server hosts and port for connection requests.
waiter = kwargs.pop('waiter')
waiter.set()
self.listen_sockets = {}
if self._common_config.bgp_server_port != 0:
for host in self._common_config.bgp_server_hosts:
server_thread, sockets = self._listen_tcp(
(host, self._common_config.bgp_server_port),
self.start_protocol)
self.listen_sockets.update(sockets)
server_thread.wait()
processor_thread.wait()
# ========================================================================
# RTC address family related utilities
# ========================================================================
def update_rtfilters(self):
"""Updates RT filters for each peer.
Should be called if a new RT Nlri's have changed based on the setting.
Currently only used by `Processor` to update the RT filters after it
has processed a RT destination. If RT filter has changed for a peer we
call RT filter change handler.
"""
# Update RT filter for all peers
# TODO(PH): Check if getting this map can be optimized (if expensive)
new_peer_to_rtfilter_map = self._compute_rtfilter_map()
# If we have new best path for RT NLRI, we have to update peer RT
# filters and take appropriate action of sending them NLRIs for other
# address-families as per new RT filter if necessary.
for peer in self._peer_manager.iterpeers:
pre_rt_filter = self._rt_mgr.peer_to_rtfilter_map.get(peer, set())
curr_rt_filter = new_peer_to_rtfilter_map.get(peer, set())
old_rts = pre_rt_filter - curr_rt_filter
new_rts = curr_rt_filter - pre_rt_filter
# If interested RTs for a peer changes
if new_rts or old_rts:
LOG.debug('RT Filter for peer %s updated: '
'Added RTs %s, Removed Rts %s',
peer.ip_address, new_rts, old_rts)
self._on_update_rt_filter(peer, new_rts, old_rts)
# Update to new RT filters
self._peer_manager.set_peer_to_rtfilter_map(new_peer_to_rtfilter_map)
self._rt_mgr.peer_to_rtfilter_map = new_peer_to_rtfilter_map
LOG.debug('Updated RT filters: %s', self._rt_mgr.peer_to_rtfilter_map)
# Update interested RTs i.e. RTs on the path that will be installed
# into global tables
self._rt_mgr.update_interested_rts()
def _on_update_rt_filter(self, peer, new_rts, old_rts):
"""Handles update of peer RT filter.
Parameters:
- `peer`: (Peer) whose RT filter has changed.
- `new_rts`: (set) of new RTs that peer is interested in.
- `old_rts`: (set) of RTs that peers is no longer interested in.
"""
for table in self._table_manager._global_tables.values():
if table.route_family == RF_RTC_UC:
continue
self._spawn('rt_filter_chg_%s' % peer,
self._rt_mgr.on_rt_filter_chg_sync_peer,
peer, new_rts, old_rts, table)
LOG.debug('RT Filter change handler launched for route_family %s',
table.route_family)
def _compute_rtfilter_map(self):
"""Returns neighbor's RT filter (permit/allow filter based on RT).
Walks RT filter tree and computes current RT filters for each peer that
have advertised RT NLRIs.
Returns:
dict of peer, and `set` of rts that a particular neighbor is
interested in.
"""
rtfilter_map = {}
def get_neigh_filter(neigh):
neigh_filter = rtfilter_map.get(neigh)
# Lazy creation of neighbor RT filter
if neigh_filter is None:
neigh_filter = set()
rtfilter_map[neigh] = neigh_filter
return neigh_filter
# Check if we have to use all paths or just best path
if self._common_config.max_path_ext_rtfilter_all:
# We have to look at all paths for a RtDest
for rtcdest in self._table_manager.get_rtc_table().values():
known_path_list = rtcdest.known_path_list
for path in known_path_list:
neigh = path.source
# We ignore NC
if neigh is None:
continue
neigh_filter = get_neigh_filter(neigh)
neigh_filter.add(path.nlri.route_target)
else:
# We iterate over all destination of the RTC table and for iBGP
# peers we use all known paths' RTs for RT filter and for eBGP
# peers we only consider best-paths' RTs for RT filter
for rtcdest in self._table_manager.get_rtc_table().values():
path = rtcdest.best_path
# If this destination does not have any path, we continue
if not path:
continue
neigh = path.source
# Consider only eBGP peers and ignore NC
if neigh and neigh.is_ebgp_peer():
# For eBGP peers we use only best-path to learn RT filter
neigh_filter = get_neigh_filter(neigh)
neigh_filter.add(path.nlri.route_target)
else:
# For iBGP peers we use all known paths to learn RT filter
known_path_list = rtcdest.known_path_list
for path in known_path_list:
neigh = path.source
# We ignore NC, and eBGP peers
if neigh and not neigh.is_ebgp_peer():
neigh_filter = get_neigh_filter(neigh)
neigh_filter.add(path.nlri.route_target)
return rtfilter_map
# ========================================================================
# Peer or Neighbor related handles/utilities.
# ========================================================================
def register_flexinet_sink(self, sink):
self._sinks.add(sink)
def unregister_flexinet_sink(self, sink):
self._sinks.remove(sink)
def update_flexinet_peers(self, path, route_dist):
for sink in self._sinks:
out_route = FlexinetOutgoingRoute(path, route_dist)
sink.enque_outgoing_msg(out_route)
def _set_password(self, address, password):
if ip.valid_ipv4(address):
family = socket.AF_INET
else:
family = socket.AF_INET6
for sock in self.listen_sockets.values():
if sock.family == family:
sockopt.set_tcp_md5sig(sock, address, password)
def on_peer_added(self, peer):
if peer._neigh_conf.password:
self._set_password(peer._neigh_conf.ip_address,
peer._neigh_conf.password)
if self.started:
self._spawn_activity(
peer, self.start_protocol
)
# We need to handle new RTC_AS
if peer.rtc_as != self.asn:
self._spawn(
'NEW_RTC_AS_HANDLER %s' % peer.rtc_as,
self._rt_mgr.update_rtc_as_set
)
def on_peer_removed(self, peer):
if peer._neigh_conf.password:
# setting zero length key means deleting the key
self._set_password(peer._neigh_conf.ip_address, '')
if peer.rtc_as != self.asn:
self._spawn(
'OLD_RTC_AS_HANDLER %s' % peer.rtc_as,
self._rt_mgr.update_rtc_as_set
)
def build_protocol(self, socket):
assert socket
# Check if its a reactive connection or pro-active connection
_, remote_port = self.get_remotename(socket)
remote_port = int(remote_port)
is_reactive_conn = True
if remote_port == STD_BGP_SERVER_PORT_NUM:
is_reactive_conn = False
bgp_protocol = self.protocol(
socket,
self._signal_bus,
is_reactive_conn=is_reactive_conn
)
return bgp_protocol
def start_protocol(self, socket):
"""Handler of new connection requests on bgp server port.
Checks if new connection request is valid and starts new instance of
protocol.
"""
assert socket
peer_addr, peer_port = self.get_remotename(socket)
peer = self._peer_manager.get_by_addr(peer_addr)
bgp_proto = self.build_protocol(socket)
# We reject this connection request from peer:
# 1) If we have connection initiated by a peer that is not in our
# configuration.
# 2) If this neighbor is not enabled according to configuration.
if not peer or not peer.enabled:
LOG.debug('Closed connection %s %s:%s as it is not a recognized'
' peer.', 'from' if bgp_proto.is_reactive else 'to',
peer_addr, peer_port)
# Send connection rejected notification as per RFC
code = BGP_ERROR_CEASE
subcode = BGP_ERROR_SUB_CONNECTION_RESET
bgp_proto.send_notification(code, subcode)
elif bgp_proto.is_reactive and \
peer.connect_mode is CONNECT_MODE_ACTIVE:
LOG.debug('Closed connection from %s:%s as connect_mode is'
' configured ACTIVE.', peer_addr, peer_port)
# Send connection rejected notification as per RFC
code = BGP_ERROR_CEASE
subcode = BGP_ERROR_SUB_CONNECTION_RESET
bgp_proto.send_notification(code, subcode)
elif not (peer.in_idle() or peer.in_active() or peer.in_connect()):
LOG.debug('Closing connection to %s:%s as we have connection'
' in state other than IDLE or ACTIVE,'
' i.e. connection resolution',
peer_addr, peer_port)
# Send Connection Collision Resolution notification as per RFC.
code = BGP_ERROR_CEASE
subcode = BGP_ERROR_SUB_CONNECTION_COLLISION_RESOLUTION
bgp_proto.send_notification(code, subcode)
else:
bind_ip, bind_port = self.get_localname(socket)
peer._host_bind_ip = bind_ip
peer._host_bind_port = bind_port
self._spawn_activity(bgp_proto, peer)
def start_bmp(self, host, port):
if (host, port) in self.bmpclients:
bmpclient = self.bmpclients[(host, port)]
if bmpclient.started:
LOG.warning("bmpclient is already running for %s:%s",
host, port)
return False
bmpclient = BMPClient(self, host, port)
self.bmpclients[(host, port)] = bmpclient
self._spawn_activity(bmpclient)
return True
def stop_bmp(self, host, port):
if (host, port) not in self.bmpclients:
LOG.warning("no bmpclient is running for %s:%s", host, port)
return False
bmpclient = self.bmpclients[(host, port)]
bmpclient.stop()
|
2633f9818955d0fa0cd0afb254d68c2b1e01460d
|
e7149583d700b1c9633dff7395d21ca204f07b5c
|
/ihp-sg13g2/libs.tech/pycell/pmos_code.py
|
03c25cdc4583f19f64ffbe9bf9bb31cd68a47d7e
|
[
"Apache-2.0"
] |
permissive
|
IHP-GmbH/IHP-Open-PDK
|
380763a4a0c0f17bf3f0882a7710313efde2c282
|
bc266aae4b8636cc46b5d1b742bb7669a9fdf1b5
|
refs/heads/main
| 2023-09-01T01:20:48.883112
| 2023-08-24T12:28:59
| 2023-08-24T12:28:59
| 533,473,538
| 182
| 14
|
Apache-2.0
| 2023-09-13T12:37:56
| 2022-09-06T19:28:15
|
Python
|
UTF-8
|
Python
| false
| false
| 10,864
|
py
|
pmos_code.py
|
########################################################################
#
# Copyright 2023 IHP PDK Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
########################################################################
__version__ = '$Revision: #3 $'
from cni.dlo import *
from geometry import *
from thermal import *
from utility_functions import *
import math
class pmos(DloGen):
@classmethod
def defineParamSpecs(self, specs):
techparams = specs.tech.getTechParams()
CDFVersion = techparams['CDFVersion']
model = 'sg13_lv_pmos'
defL = techparams['pmos_defL']
defW = techparams['pmos_defW']
defNG = techparams['pmos_defNG']
minL = techparams['pmos_minL']
minW = techparams['pmos_minW']
specs('cdf_version', CDFVersion, 'CDF Version')
specs('Display', 'Selected', 'Display', ChoiceConstraint(['All', 'Selected']))
specs('model', model, 'Model name')
specs('w' , defW, 'Width')
specs('ws', eng_string(Numeric(defW)/Numeric(defNG)), 'SingleWidth')
specs('l' , defL, 'Length')
specs('Wmin', minW, 'Wmin')
specs('Lmin', minL, 'Lmin')
specs('ng', defNG, 'Number of Gates')
specs('m', '1', 'Multiplier')
specs('trise', '', 'Temp rise from ambient')
def setupParams(self, params):
# process parameter values entered by user
self.params = params
self.w = Numeric(params['w'])
self.l = Numeric(params['l'])
self.ng = Numeric(params['ng'])
def genLayout(self):
self.grid = self.tech.getGridResolution()
self.techparams = self.tech.getTechParams()
self.epsilon = self.techparams['epsilon1']
w = self.w
ng = self.ng
l = self.l
typ = 'P'
hv = False
ndiff_layer = Layer('Activ', 'drawing') # 1
pdiff_layer = Layer('Activ', 'drawing') # 1
poly_layer = Layer('GatPoly', 'drawing') # 5
locint_layer = Layer('Cont', 'drawing') # 6
metall_layer = Layer('Metal1', 'drawing') # 8
pdiffx_layer = Layer('pSD', 'drawing') # 14
well_layer = Layer('NWell', 'drawing') # 31
tgo_layer = Layer('ThickGateOx', 'drawing') # 44
textlayer = Layer('TEXT', 'drawing') # 63
endcap = self.techparams['M1_c1']
cont_size = self.techparams['Cnt_a']
cont_dist = self.techparams['Cnt_b']
cont_Activ_overRec = self.techparams['Cnt_c']
cont_metall_over = self.techparams['M1_c']
psd_pActiv_over = self.techparams['pSD_c']
nwell_pActiv_over = self.techparams['NW_c']
gatpoly_Activ_over = self.techparams['Gat_c']
gatpoly_cont_dist = self.techparams['Cnt_f']
smallw_gatpoly_cont_dist = cont_Activ_overRec+self.techparams['Gat_d']
psd_PFET_over = self.techparams['pSD_i']
pdiffx_poly_over_orth = 0.48
wmin = Numeric(self.techparams['pmos_minW'])
lmin = Numeric(self.techparams['pmos_minL'])
contActMin = 2*cont_Activ_overRec+cont_size
thGateOxGat = self.techparams['TGO_c']
thGateOxAct = self.techparams['TGO_a']
dbReplaceProp(self, 'pin#', 5)
w = w*1e6;
l = l*1e6;
ng = math.floor(Numeric(ng)+self.epsilon)
w = w/ng
w = GridFix(w)
l = GridFix(l)
# additional Text for label
if hv :
labelhv = 'HV'
else :
labelhv = ''
if w < contActMin-self.epsilon :
gatpoly_cont_dist = smallw_gatpoly_cont_dist
xdiff_beg = 0
ydiff_beg = 0
ydiff_end = w
if w < wmin-self.epsilon :
hiGetAttention()
print('Width < '+str(wmin))
w = wmin
if l < lmin-self.epsilon :
hiGetAttention()
print('Length < '+str(lmin))
l = lmin
if ng < 1 :
hiGetAttention()
print('Minimum one finger')
ng = 1
xanz = math.floor((w-2*cont_Activ_overRec+cont_dist)/(cont_size+cont_dist)+self.epsilon)
w1 = xanz*(cont_size+cont_dist)-cont_dist+cont_Activ_overRec+cont_Activ_overRec
xoffset = (w-w1)/2
xoffset = GridFix(xoffset)
diffoffset = 0
if w < contActMin :
xoffset = 0
diffoffset = (contActMin-w)/2
diffoffset = Snap(diffoffset)
# get the number of contacts
lcon = w-2*cont_Activ_overRec
distc = cont_size+cont_dist
ncont = math.floor((lcon+cont_dist-2*endcap)/distc + self.epsilon)
if zerop(ncont) :
ncont = 1
diff_cont_offset = GridFix((w-2*cont_Activ_overRec-ncont*cont_size-(ncont-1)*cont_dist)/2)
# draw the cont row
xcont_beg = xdiff_beg+cont_Activ_overRec
ycont_beg = ydiff_beg+cont_Activ_overRec
ycont_cnt = ycont_beg+diffoffset+diff_cont_offset
xcont_end = xcont_beg+cont_size
# draw contacts
# LI and Metall
contactArray(self, 0, locint_layer, xcont_beg, ydiff_beg, xcont_end, ydiff_end+diffoffset*2, 0, cont_Activ_overRec, cont_size, cont_dist)
# 30.01.08 GGa added block
# draw Metal rect
# calculate bot and top cont position
yMet1 = ycont_cnt-endcap
yMet2 = ycont_cnt+cont_size+(ncont-1)*distc +endcap
# is metal1 overlapping Activ?
yMet1 = min(yMet1, ydiff_beg+diffoffset)
yMet2 = max(yMet2, ydiff_end+diffoffset)
dbCreateRect(self, metall_layer, Box(xcont_beg-cont_metall_over, yMet1, xcont_end+cont_metall_over, yMet2))
if w > contActMin :
MkPin(self, 'S', 3, Box(xcont_beg-cont_metall_over, yMet1, xcont_end+cont_metall_over, yMet2), metall_layer)
else :
MkPin(self, 'S', 3, Box(xcont_beg-cont_metall_over, yMet1, xcont_end+cont_metall_over, yMet2), metall_layer)
if typ == 'N' :
dbCreateRect(self, ndiff_layer, Box(xcont_beg-cont_Activ_overRec, ycont_beg-cont_Activ_overRec, xcont_end+cont_Activ_overRec, ycont_beg+cont_size+cont_Activ_overRec))
else : # typ == 'P'
dbCreateRect(self, pdiff_layer, Box(xcont_beg-cont_Activ_overRec, ycont_beg-cont_Activ_overRec, xcont_end+cont_Activ_overRec, ycont_beg+cont_size+cont_Activ_overRec))
for i in range(1, int(ng)+1) :
# draw the poly line
xpoly_beg = xcont_end+gatpoly_cont_dist
ypoly_beg = ydiff_beg-gatpoly_Activ_over
xpoly_end = xpoly_beg+l
ypoly_end = ydiff_end+gatpoly_Activ_over
dbCreateRect(self, poly_layer, Box(xpoly_beg, ypoly_beg+diffoffset, xpoly_end, ypoly_end+diffoffset))
ihpAddThermalMosLayer(self, Box(xpoly_beg, ypoly_beg+diffoffset, xpoly_end, ypoly_end+diffoffset), True, 'pmos')
if i == 1 :
dbCreateLabel(self, textlayer, Point((xpoly_beg+xpoly_end)/2, (ypoly_beg+ypoly_end)/2+diffoffset), 'pmos'+labelhv, 'centerCenter', 'R90', Font.EURO_STYLE, 0.1)
if onep(i) :
MkPin(self, 'G', 2, Box(xpoly_beg, ypoly_beg+diffoffset, xpoly_end, ypoly_end+diffoffset), poly_layer)
# draw the second cont row
xcont_beg = xpoly_end+gatpoly_cont_dist
ycont_beg = ydiff_beg+cont_Activ_overRec
ycont_cnt = ycont_beg+diffoffset+diff_cont_offset
xcont_end = xcont_beg+cont_size
dbCreateRect(self, metall_layer, Box(xcont_beg-cont_metall_over, yMet1, xcont_end+cont_metall_over, yMet2))
# draw contacts
# LI and Metall
contactArray(self, 0, locint_layer, xcont_beg, ydiff_beg, xcont_end, ydiff_end+diffoffset*2, 0, cont_Activ_overRec, cont_size, cont_dist)
if onep(i) :
if w > contActMin :
MkPin(self, 'D', 1, Box(xcont_beg-cont_metall_over, yMet1, xcont_end+cont_metall_over, yMet2), metall_layer)
else :
MkPin(self, 'D', 1, Box(xcont_beg-cont_metall_over, yMet1, xcont_end+cont_metall_over, yMet2), metall_layer)
if typ == 'N' :
dbCreateRect(self, ndiff_layer, Box(xcont_beg-cont_Activ_overRec, ycont_beg-cont_Activ_overRec, xcont_end+cont_Activ_overRec, ycont_beg+cont_size+cont_Activ_overRec))
else :
dbCreateRect(self, pdiff_layer, Box(xcont_beg-cont_Activ_overRec, ycont_beg-cont_Activ_overRec, xcont_end+cont_Activ_overRec, ycont_beg+cont_size+cont_Activ_overRec))
# for i 1 ng
# now finish drawing the diffusion
xdiff_end = xcont_end+cont_Activ_overRec
if typ == 'N' :
dbCreateRect(self, ndiff_layer, Box(xdiff_beg, ydiff_beg+diffoffset, xdiff_end, ydiff_end+diffoffset))
else :
dbCreateRect(self, pdiff_layer, Box(xdiff_beg, ydiff_beg+diffoffset, xdiff_end, ydiff_end+diffoffset))
dbCreateRect(self, pdiffx_layer, Box(xdiff_beg-psd_pActiv_over, ypoly_beg-psd_PFET_over+gatpoly_Activ_over+diffoffset, xdiff_end+psd_pActiv_over, ypoly_end+psd_PFET_over-gatpoly_Activ_over+diffoffset))
# draw minimum nWell
nwell_offset = max(0, GridFix((contActMin-w)/2+0.5*self.grid))
dbCreateRect(self, well_layer, Box(xdiff_beg-nwell_pActiv_over, ydiff_beg-nwell_pActiv_over+diffoffset-nwell_offset, xdiff_end+nwell_pActiv_over, ydiff_end+nwell_pActiv_over+diffoffset+nwell_offset))
# B-Pin
MkPin(self, 'B', 4, Box(xcont_beg-cont_Activ_overRec, ycont_beg-cont_Activ_overRec, xcont_end+cont_Activ_overRec, ycont_beg+cont_size+cont_Activ_overRec), Layer('Substrate', 'drawing'))
# draw Thick Gate Oxide
if hv :
dbCreateRect(self, Layer('ThickGateOx', 'drawing'), Box(xdiff_beg-thGateOxAct, ydiff_beg-gatpoly_Activ_over-thGateOxGat, xdiff_end+thGateOxAct, ydiff_end+gatpoly_Activ_over+thGateOxGat))
|
3b627ac3aa89df6529055572f704b73dfdc62912
|
ed8b5913524eb93b7e653165310bad264679add9
|
/src/steam_network/friends_cache.py
|
949416a3f2eca08d87c22917629aab897cf3007d
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
ABaumher/galaxy-integration-steam
|
d413b013c96e0b5384c91d4b5bf334fc3dd1c390
|
9d2c8dbeeedc59253edc243bda0d92ac8f299935
|
refs/heads/master
| 2023-08-17T16:07:42.256731
| 2023-06-27T15:38:31
| 2023-06-27T15:38:31
| 640,713,928
| 339
| 11
|
NOASSERTION
| 2023-07-11T17:51:30
| 2023-05-15T00:54:25
|
Python
|
UTF-8
|
Python
| false
| false
| 2,832
|
py
|
friends_cache.py
|
import logging
import asyncio
from dataclasses import dataclass
from typing import Dict
from .protocol.steam_types import ProtoUserInfo
from .cache_proto import ProtoCache
logger = logging.getLogger(__name__)
@dataclass
class AvailableInfo:
personal_info: bool = False
state: bool = False
def ready(self):
return self.personal_info and self.state
class FriendsCache(ProtoCache):
def __init__(self):
super(FriendsCache, self).__init__()
self._pending_map: Dict[str, AvailableInfo] = {}
self._info_map: Dict[str, ProtoUserInfo] = {}
self._nicknames_parsed = asyncio.Event()
self._nicknames = {}
async def wait_nicknames_ready(self, timeout=None):
try:
await asyncio.wait_for(self._nicknames_parsed.wait(), timeout)
except asyncio.TimeoutError:
logger.info("Timed out waiting for nicknames to get ready")
def update_nicknames(self, nicknames):
self._nicknames = nicknames
self._nicknames_parsed.set()
def get_nicknames(self):
return self._nicknames
def _reset(self, user_ids):
new = set(user_ids)
current = set(self._info_map.keys())
for user_id in current - new:
self._remove(user_id)
for user_id in new - current:
self._add(user_id)
def _add(self, user_id):
if user_id in self._info_map:
return
self._pending_map[user_id] = AvailableInfo()
self._info_map[user_id] = ProtoUserInfo()
def _remove(self, user_id):
pending = self._pending_map.pop(user_id, None)
user_info = self._info_map.pop(user_id, None)
if user_info is None:
return # user is not in cache
if pending is None:
# removed ready user
if self.removed_handler is not None:
self.removed_handler(user_id)
async def update(self, user_id, user_info: ProtoUserInfo):
current_info = self._info_map.get(user_id)
if current_info is None:
return # not a friend, ignoring
changed = current_info.update(user_info)
available_info = self._pending_map.get(user_id)
if available_info is None:
if changed and self.updated_handler is not None:
await self.updated_handler(user_id, current_info)
else:
if user_info.name is not None:
available_info.personal_info = True
if user_info.state is not None:
available_info.state = True
if available_info.ready():
del self._pending_map[user_id]
if self.added_handler is not None:
self.added_handler(user_id, current_info)
self._update_ready_state() # if pending is empty
|
2154cd34e5950f89f74aa858a69d6b390b71aed7
|
9335c48ecf8e8c003d014b8fc2a2fe1ad22ea379
|
/pytorch_toolbelt/modules/encoders/common.py
|
f7979257d5402819396f4db6c96aeea4f64472f9
|
[
"MIT"
] |
permissive
|
BloodAxe/pytorch-toolbelt
|
7f86f3f3f9a7cdcb8d49a5f45882f7d16556c535
|
75e6f467472702acbbb7e690d8cbf5496b859c29
|
refs/heads/develop
| 2023-08-28T18:57:51.377858
| 2023-08-27T09:50:01
| 2023-08-27T09:50:01
| 175,851,515
| 1,503
| 126
|
MIT
| 2023-08-19T14:23:55
| 2019-03-15T16:02:49
|
Python
|
UTF-8
|
Python
| false
| false
| 5,109
|
py
|
common.py
|
"""Wrappers for different backbones for models that follows Encoder-Decoder architecture.
Encodes listed here provides easy way to swap backbone of classification/segmentation/detection model.
"""
import math
import warnings
from typing import List, Union, Tuple, Iterable, Any
import torch
from torch import nn, Tensor
__all__ = ["EncoderModule", "_take", "_take_ints", "_take_tensors", "make_n_channel_input"]
from pytorch_toolbelt.utils.support import pytorch_toolbelt_deprecated
from pytorch_toolbelt.modules.interfaces import (
HasOutputFeaturesSpecification,
FeatureMapsSpecification,
AbstractEncoder,
)
def _take(elements: List[Any], indexes: List[int]) -> List[Any]:
selected = []
for i in indexes:
selected.append(elements[i])
return selected
def _take_ints(elements: List[int], indexes: List[int]) -> List[int]:
selected: List[int] = []
for i in indexes:
selected.append(elements[i])
return selected
def _take_tensors(elements: List[Tensor], indexes: List[int]) -> List[Tensor]:
selected: List[Tensor] = []
for i in indexes:
selected.append(elements[i])
return selected
def make_n_channel_input_conv(
conv: Union[nn.Conv1d, nn.Conv2d, nn.Conv3d], in_channels: int, mode="auto", **kwargs
) -> Union[nn.Conv1d, nn.Conv2d, nn.Conv3d]:
"""
Create convolution block with same parameters and desired number of channels.
Args:
conv: Input nn.Conv2D object to copy settings/weights from
in_channels: Desired number of input channels
mode:
**kwargs: Optional overrides for Conv2D parameters
"""
conv_cls = conv.__class__
if conv.in_channels == in_channels:
warnings.warn("make_n_channel_input call is spurious")
return conv
new_conv = conv_cls(
in_channels,
out_channels=conv.out_channels,
kernel_size=kwargs.get("kernel_size", conv.kernel_size),
stride=kwargs.get("stride", conv.stride),
padding=kwargs.get("padding", conv.padding),
dilation=kwargs.get("dilation", conv.dilation),
groups=kwargs.get("groups", conv.groups),
bias=kwargs.get("bias", conv.bias is not None),
padding_mode=kwargs.get("padding_mode", conv.padding_mode),
)
w = conv.weight
if in_channels > conv.in_channels:
n = math.ceil(in_channels / float(conv.in_channels))
w = torch.cat([w] * n, dim=1)
w = w[:, :in_channels, ...]
new_conv.weight = nn.Parameter(w, requires_grad=True)
else:
w = w[:, 0:in_channels, ...]
new_conv.weight = nn.Parameter(w, requires_grad=True)
return new_conv
def make_n_channel_input(conv: nn.Module, in_channels: int, mode="auto", **kwargs) -> nn.Module:
"""
Create convolution block with same parameters and desired number of channels.
Args:
conv: Input nn.Conv2D object to copy settings/weights from
in_channels: Desired number of input channels
mode:
**kwargs: Optional overrides for Conv2D parameters
"""
if isinstance(conv, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
return make_n_channel_input_conv(conv, in_channels=in_channels, mode=mode, **kwargs)
raise ValueError(f"Unsupported class {conv.__class__.__name__}")
class EncoderModule(AbstractEncoder):
__constants__ = ["_layers", "_output_strides", "_output_filters"]
def __init__(self, channels: List[int], strides: List[int], layers: List[int]):
super().__init__()
if len(channels) != len(strides):
raise ValueError("Number of channels must be equal to number of strides")
self._layers = list(layers)
self.output_spec = FeatureMapsSpecification(
channels=_take_ints(channels, layers), strides=_take_ints(strides, layers)
)
def forward(self, x: Tensor) -> List[Tensor]: # skipcq: PYL-W0221
output_features = []
for layer in self.encoder_layers:
output = layer(x)
output_features.append(output)
x = output
# Return only features that were requested
return _take_tensors(output_features, self._layers)
@property
@torch.jit.unused
def channels(self) -> Tuple[int, ...]:
return self.get_output_spec().channels
@property
@torch.jit.unused
def strides(self) -> Tuple[int, ...]:
return self.get_output_spec().strides
@torch.jit.unused
def set_trainable(self, trainable):
for param in self.parameters():
param.requires_grad = bool(trainable)
@torch.jit.unused
def change_input_channels(self, input_channels: int, mode="auto", **kwargs):
"""
Change number of channels expected in the input tensor. By default,
all encoders assume 3-channel image in BCHW notation with C=3.
This method changes first convolution to have user-defined number of
channels as input.
"""
raise NotImplementedError
@torch.jit.unused
def get_output_spec(self) -> FeatureMapsSpecification:
return self.output_spec
|
44d51c03351faa6eb840e54fd8845dc7b91362b0
|
6f0ceee714bccf2a89c34a06aabd3bcb781a2fa4
|
/python/mxnet/gluon/contrib/estimator/batch_processor.py
|
aa5adbfdea5f05ea4a7d62c4e98e136a850fd170
|
[
"Apache-2.0",
"MIT",
"Unlicense",
"BSL-1.0",
"NCSA",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause",
"OFL-1.0",
"BSD-2-Clause-Views",
"Zlib"
] |
permissive
|
yajiedesign/mxnet
|
5a495fd06dd1730c17d2d27d7e46c8a770847f17
|
8e5a16cf673db5aceb48d2cf7a0fc1abd0ee5e51
|
refs/heads/master
| 2021-03-30T22:37:18.603396
| 2020-10-23T06:40:17
| 2020-10-23T06:40:17
| 43,763,550
| 214
| 59
|
Apache-2.0
| 2020-06-01T23:31:15
| 2015-10-06T16:36:40
|
C++
|
UTF-8
|
Python
| false
| false
| 3,925
|
py
|
batch_processor.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=wildcard-import, unused-argument, too-many-ancestors
"""Gluon Batch Processor for Estimators"""
from ...utils import split_and_load
from .... import autograd
__all__ = ['BatchProcessor']
class BatchProcessor(object):
"""BatchProcessor Class for plug and play fit_batch & evaluate_batch
During training or validation, data are divided into minibatches for processing. This
class aims at providing hooks of training or validating on a minibatch of data. Users
may provide customized fit_batch() and evaluate_batch() methods by inheriting from
this class and overriding class methods.
:py:class:`BatchProcessor` can be used to replace fit_batch() and evaluate_batch()
in the base estimator class
"""
def __init__(self):
pass
def _get_data_and_label(self, batch, ctx, batch_axis=0):
data = batch[0]
label = batch[1]
data = split_and_load(data, ctx_list=ctx, batch_axis=batch_axis)
label = split_and_load(label, ctx_list=ctx, batch_axis=batch_axis)
return data, label
def evaluate_batch(self, estimator,
val_batch,
batch_axis=0):
"""Evaluate the estimator model on a batch of validation data.
Parameters
----------
estimator : Estimator
Reference to the estimator
val_batch : tuple
Data and label of a batch from the validation data loader.
batch_axis : int, default 0
Batch axis to split the validation data into devices.
"""
data, label = self._get_data_and_label(val_batch, estimator.context, batch_axis)
pred = [estimator.val_net(x) for x in data]
loss = [estimator.val_loss(y_hat, y) for y_hat, y in zip(pred, label)]
return data, label, pred, loss
def fit_batch(self, estimator,
train_batch,
batch_axis=0):
"""Trains the estimator model on a batch of training data.
Parameters
----------
estimator : Estimator
Reference to the estimator
train_batch : tuple
Data and label of a batch from the training data loader.
batch_axis : int, default 0
Batch axis to split the training data into devices.
Returns
-------
data: List of NDArray
Sharded data from the batch. Data is sharded with
`gluon.split_and_load`.
label: List of NDArray
Sharded label from the batch. Labels are sharded with
`gluon.split_and_load`.
pred: List of NDArray
Prediction on each of the sharded inputs.
loss: List of NDArray
Loss on each of the sharded inputs.
"""
data, label = self._get_data_and_label(train_batch, estimator.context, batch_axis)
with autograd.record():
pred = [estimator.net(x) for x in data]
loss = [estimator.loss(y_hat, y) for y_hat, y in zip(pred, label)]
for l in loss:
l.backward()
return data, label, pred, loss
|
8708f8605ac8059caa6b27c9f4ce4df3377a73e0
|
9ed4d46aedd4d4acadb48d610e940594b5b7b3fd
|
/backtracking/power_sum.py
|
fcf1429f8570ccc3c60cfa528b2d41edf8d59c88
|
[
"MIT"
] |
permissive
|
TheAlgorithms/Python
|
7596a0e236ed12a61f9db19a7ea68309779cc85b
|
421ace81edb0d9af3a173f4ca7e66cc900078c1d
|
refs/heads/master
| 2023-09-01T17:32:20.190949
| 2023-08-29T13:18:10
| 2023-08-29T13:18:10
| 63,476,337
| 184,217
| 48,615
|
MIT
| 2023-09-14T02:05:29
| 2016-07-16T09:44:01
|
Python
|
UTF-8
|
Python
| false
| false
| 2,676
|
py
|
power_sum.py
|
"""
Problem source: https://www.hackerrank.com/challenges/the-power-sum/problem
Find the number of ways that a given integer X, can be expressed as the sum
of the Nth powers of unique, natural numbers. For example, if X=13 and N=2.
We have to find all combinations of unique squares adding up to 13.
The only solution is 2^2+3^2. Constraints: 1<=X<=1000, 2<=N<=10.
"""
from math import pow
def backtrack(
needed_sum: int,
power: int,
current_number: int,
current_sum: int,
solutions_count: int,
) -> tuple[int, int]:
"""
>>> backtrack(13, 2, 1, 0, 0)
(0, 1)
>>> backtrack(100, 2, 1, 0, 0)
(0, 3)
>>> backtrack(100, 3, 1, 0, 0)
(0, 1)
>>> backtrack(800, 2, 1, 0, 0)
(0, 561)
>>> backtrack(1000, 10, 1, 0, 0)
(0, 0)
>>> backtrack(400, 2, 1, 0, 0)
(0, 55)
>>> backtrack(50, 1, 1, 0, 0)
(0, 3658)
"""
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
i_to_n = int(pow(current_number, power))
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
current_sum, solutions_count = backtrack(
needed_sum, power, current_number + 1, current_sum, solutions_count
)
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
current_sum, solutions_count = backtrack(
needed_sum, power, current_number + 1, current_sum, solutions_count
)
return current_sum, solutions_count
def solve(needed_sum: int, power: int) -> int:
"""
>>> solve(13, 2)
1
>>> solve(100, 2)
3
>>> solve(100, 3)
1
>>> solve(800, 2)
561
>>> solve(1000, 10)
0
>>> solve(400, 2)
55
>>> solve(50, 1)
Traceback (most recent call last):
...
ValueError: Invalid input
needed_sum must be between 1 and 1000, power between 2 and 10.
>>> solve(-10, 5)
Traceback (most recent call last):
...
ValueError: Invalid input
needed_sum must be between 1 and 1000, power between 2 and 10.
"""
if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
raise ValueError(
"Invalid input\n"
"needed_sum must be between 1 and 1000, power between 2 and 10."
)
return backtrack(needed_sum, power, 1, 0, 0)[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
|
9a57cfa13b76c1feeb49c4aa5b5753828da31c85
|
952dc66c61966f099756cdb6c2d13b40352f63cc
|
/zerver/tests/test_mirror_users.py
|
2b4049434c2def9b9d75d3353e11a783f50b5ec4
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
zulip/zulip
|
5ae6aad35fd9f72996c0a2a9cdd674400966ebf6
|
965a25d91b6ee2db54038f5df855215fa25146b0
|
refs/heads/main
| 2023-08-28T23:43:00.971110
| 2023-08-28T16:47:09
| 2023-08-28T19:33:02
| 43,160,685
| 20,239
| 8,996
|
Apache-2.0
| 2023-09-14T20:57:47
| 2015-09-25T16:37:25
|
Python
|
UTF-8
|
Python
| false
| false
| 6,764
|
py
|
test_mirror_users.py
|
from typing import Any, List
from unittest import mock
from django.db import IntegrityError
from django.utils.timezone import now as timezone_now
from zerver.actions.message_send import create_mirror_user_if_needed
from zerver.lib.create_user import create_user_profile
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import reset_email_visibility_to_everyone_in_zulip_realm
from zerver.models import UserProfile, get_client, get_realm, get_user
from zerver.views.message_send import InvalidMirrorInputError, create_mirrored_message_users
class MirroredMessageUsersTest(ZulipTestCase):
def test_invalid_client(self) -> None:
user = self.example_user("hamlet")
sender = user
recipients: List[str] = []
recipient_type_name = "private"
client = get_client("banned_mirror")
with self.assertRaises(InvalidMirrorInputError):
create_mirrored_message_users(
client, user, recipients, sender.email, recipient_type_name
)
def test_invalid_email(self) -> None:
invalid_email = "alice AT example.com"
recipients = [invalid_email]
# We use an MIT user here to maximize code coverage
user = self.mit_user("starnine")
sender = user
recipient_type_name = "private"
for client_name in ["zephyr_mirror", "irc_mirror", "jabber_mirror"]:
client = get_client(client_name)
with self.assertRaises(InvalidMirrorInputError):
create_mirrored_message_users(
client, user, recipients, sender.email, recipient_type_name
)
@mock.patch(
"DNS.dnslookup",
return_value=[["sipbtest:*:20922:101:Fred Sipb,,,:/mit/sipbtest:/bin/athena/tcsh"]],
)
def test_zephyr_mirror_new_recipient(self, ignored: object) -> None:
"""Test mirror dummy user creation for direct message recipients"""
user = self.mit_user("starnine")
sender = self.mit_user("sipbtest")
new_user_email = "bob_the_new_user@mit.edu"
new_user_realm = get_realm("zephyr")
recipients = [user.email, new_user_email]
recipient_type_name = "private"
client = get_client("zephyr_mirror")
mirror_sender = create_mirrored_message_users(
client, user, recipients, sender.email, recipient_type_name
)
self.assertEqual(mirror_sender, sender)
realm_users = UserProfile.objects.filter(realm=sender.realm)
realm_emails = {user.email for user in realm_users}
self.assertIn(user.email, realm_emails)
self.assertIn(new_user_email, realm_emails)
bob = get_user(new_user_email, new_user_realm)
self.assertTrue(bob.is_mirror_dummy)
@mock.patch(
"DNS.dnslookup",
return_value=[["sipbtest:*:20922:101:Fred Sipb,,,:/mit/sipbtest:/bin/athena/tcsh"]],
)
def test_zephyr_mirror_new_sender(self, ignored: object) -> None:
"""Test mirror dummy user creation for sender when sending to stream"""
user = self.mit_user("starnine")
sender_email = "new_sender@mit.edu"
recipients = ["stream_name"]
recipient_type_name = "stream"
client = get_client("zephyr_mirror")
mirror_sender = create_mirrored_message_users(
client, user, recipients, sender_email, recipient_type_name
)
assert mirror_sender is not None
self.assertEqual(mirror_sender.email, sender_email)
self.assertTrue(mirror_sender.is_mirror_dummy)
def test_irc_mirror(self) -> None:
reset_email_visibility_to_everyone_in_zulip_realm()
user = self.example_user("hamlet")
sender = user
recipients = [
self.nonreg_email("alice"),
"bob@irc.zulip.com",
self.nonreg_email("cordelia"),
]
recipient_type_name = "private"
client = get_client("irc_mirror")
mirror_sender = create_mirrored_message_users(
client, user, recipients, sender.email, recipient_type_name
)
self.assertEqual(mirror_sender, sender)
realm_users = UserProfile.objects.filter(realm=sender.realm)
realm_emails = {user.email for user in realm_users}
self.assertIn(self.nonreg_email("alice"), realm_emails)
self.assertIn("bob@irc.zulip.com", realm_emails)
bob = get_user("bob@irc.zulip.com", sender.realm)
self.assertTrue(bob.is_mirror_dummy)
def test_jabber_mirror(self) -> None:
reset_email_visibility_to_everyone_in_zulip_realm()
user = self.example_user("hamlet")
sender = user
recipients = [
self.nonreg_email("alice"),
self.nonreg_email("bob"),
self.nonreg_email("cordelia"),
]
recipient_type_name = "private"
client = get_client("jabber_mirror")
mirror_sender = create_mirrored_message_users(
client, user, recipients, sender.email, recipient_type_name
)
self.assertEqual(mirror_sender, sender)
realm_users = UserProfile.objects.filter(realm=sender.realm)
realm_emails = {user.email for user in realm_users}
self.assertIn(self.nonreg_email("alice"), realm_emails)
self.assertIn(self.nonreg_email("bob"), realm_emails)
bob = get_user(self.nonreg_email("bob"), sender.realm)
self.assertTrue(bob.is_mirror_dummy)
def test_create_mirror_user_despite_race(self) -> None:
realm = get_realm("zulip")
email = "fred@example.com"
email_to_full_name = lambda email: "fred"
def create_user(**kwargs: Any) -> UserProfile:
self.assertEqual(kwargs["full_name"], "fred")
self.assertEqual(kwargs["email"], email)
self.assertEqual(kwargs["active"], False)
self.assertEqual(kwargs["is_mirror_dummy"], True)
# We create an actual user here to simulate a race.
# We use the minimal, un-mocked function.
kwargs["bot_type"] = None
kwargs["bot_owner"] = None
kwargs["tos_version"] = None
kwargs["timezone"] = timezone_now()
kwargs["email_address_visibility"] = UserProfile.EMAIL_ADDRESS_VISIBILITY_EVERYONE
create_user_profile(**kwargs).save()
raise IntegrityError
with mock.patch("zerver.actions.message_send.create_user", side_effect=create_user) as m:
mirror_fred_user = create_mirror_user_if_needed(
realm,
email,
email_to_full_name,
)
self.assertEqual(mirror_fred_user.delivery_email, email)
m.assert_called()
|
5f32820a75d36ddd571bea5fc740307462eaad9c
|
f52b8606074c2e0cc0c60b30b51c015fd932b0a2
|
/virl/cli/ls/commands.py
|
72ec103f9935cd5b3ad919388aba309f1ee26d6f
|
[
"MIT"
] |
permissive
|
CiscoDevNet/virlutils
|
81af101bcca6a28fd584ab4b8f3e1aed5a6345c3
|
84afb01a6366d54febb9481c364a378f891327f4
|
refs/heads/master
| 2023-04-30T12:00:05.233334
| 2023-03-06T14:46:48
| 2023-03-06T14:46:48
| 114,168,527
| 144
| 47
|
MIT
| 2023-04-25T18:32:39
| 2017-12-13T21:02:26
|
Python
|
UTF-8
|
Python
| false
| false
| 2,004
|
py
|
commands.py
|
import os
import click
from virl.api import VIRLServer, CachedLab, ViewerPlugin, NoPluginError
from virl.cli.views import sim_list_table, lab_list_table
from virl.helpers import find_virl, get_cml_client, get_cache_root
@click.command()
@click.option(
"--all/--server",
default=False,
show_default=False,
required=False,
help="Display cached labs in addition to those on the server (default: server labs only)",
)
@click.option(
"--all-users/--only-me",
default=False,
show_default=False,
required=False,
help="Display labs for all users (only if current user is an admin) (default: only show labs owned by me)",
)
def ls(all, all_users):
"""
lists running labs and optionally those in the cache
"""
server = VIRLServer()
client = get_cml_client(server)
labs = []
cached_labs = None
lab_ids = client.get_lab_list(all_users)
for id in lab_ids:
labs.append(client.join_existing_lab(id))
if all:
cached_labs = []
cache_root = get_cache_root()
if os.path.isdir(cache_root):
for f in os.listdir(cache_root):
lab_id = f
cached_labs.append(CachedLab(lab_id, cache_root + "/" + f))
try:
pl = ViewerPlugin(viewer="lab")
pl.visualize(labs=labs, cached_labs=cached_labs)
except NoPluginError:
lab_list_table(labs, cached_labs)
@click.command()
@click.option(
"--all/--local",
default=False,
help=" \
Display all simulations or only ones from the current project (default)",
)
def ls1(all, **kwargs):
"""
lists running simulations in the current project
"""
server = VIRLServer()
sim_dict = server.list_simulations()
if not all:
# only sims for this project
dirpath = find_virl()
foldername = os.path.basename(dirpath)
for k in list(sim_dict):
if not k.startswith(foldername):
sim_dict.pop(k)
sim_list_table(sim_dict)
|
d14cfa2ae58586c74f071ea38c83d4977eadf8a4
|
a61bf859ceeb1ba98de3863225e07b29e1d7ce8a
|
/thonny/plugins/micropython/base_api_stubs/statistics.pyi
|
8bf56d43c82d553a509c5897adcb361e0649fe23
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
thonny/thonny
|
3974b1860703e8450b837863682117f525a886c6
|
8fc9f5c7cbbe1d1c82aa5503ec4b684e28aa608c
|
refs/heads/master
| 2023-08-31T03:04:34.685140
| 2023-08-24T11:38:36
| 2023-08-24T11:38:36
| 163,728,962
| 2,788
| 1,048
|
MIT
| 2023-08-10T18:59:37
| 2019-01-01T10:29:50
|
Python
|
UTF-8
|
Python
| false
| false
| 63
|
pyi
|
statistics.pyi
|
def kaka() -> str:
"""
blahhaa
:return: sss
"""
|
055d33855786fec82320a8d4dc22d85a92874257
|
1a7d9d2f7fff1e965691ca08ac2e556599a5c8f7
|
/main.py
|
cead17cf680613a654899adb2bc5c261e01f478c
|
[
"MIT"
] |
permissive
|
Michaelvll/DeepCCA
|
aac3b6930113103c3465c553503874939ebbf3f7
|
63186ba00932337e1181db653c4304395372d334
|
refs/heads/master
| 2023-09-01T15:13:32.496792
| 2023-08-24T17:16:15
| 2023-08-24T17:16:15
| 151,364,352
| 259
| 70
|
NOASSERTION
| 2023-08-24T17:16:17
| 2018-10-03T05:18:46
|
Python
|
UTF-8
|
Python
| false
| false
| 9,233
|
py
|
main.py
|
import torch
import torch.nn as nn
import numpy as np
from linear_cca import linear_cca
from torch.utils.data import BatchSampler, SequentialSampler, RandomSampler
from DeepCCAModels import DeepCCA
from utils import load_data, svm_classify
import time
import logging
try:
import cPickle as thepickle
except ImportError:
import _pickle as thepickle
import gzip
import numpy as np
import torch.nn as nn
torch.set_default_tensor_type(torch.DoubleTensor)
class Solver():
def __init__(self, model, linear_cca, outdim_size, epoch_num, batch_size, learning_rate, reg_par, device=torch.device('cpu')):
self.model = nn.DataParallel(model)
self.model.to(device)
self.epoch_num = epoch_num
self.batch_size = batch_size
self.loss = model.loss
self.optimizer = torch.optim.RMSprop(
self.model.parameters(), lr=learning_rate, weight_decay=reg_par)
self.device = device
self.linear_cca = linear_cca
self.outdim_size = outdim_size
formatter = logging.Formatter(
"[ %(levelname)s : %(asctime)s ] - %(message)s")
logging.basicConfig(
level=logging.DEBUG, format="[ %(levelname)s : %(asctime)s ] - %(message)s")
self.logger = logging.getLogger("Pytorch")
fh = logging.FileHandler("DCCA.log")
fh.setFormatter(formatter)
self.logger.addHandler(fh)
self.logger.info(self.model)
self.logger.info(self.optimizer)
def fit(self, x1, x2, vx1=None, vx2=None, tx1=None, tx2=None, checkpoint='checkpoint.model'):
"""
x1, x2 are the vectors needs to be make correlated
dim=[batch_size, feats]
"""
x1.to(self.device)
x2.to(self.device)
data_size = x1.size(0)
if vx1 is not None and vx2 is not None:
best_val_loss = 0
vx1.to(self.device)
vx2.to(self.device)
if tx1 is not None and tx2 is not None:
tx1.to(self.device)
tx2.to(self.device)
train_losses = []
for epoch in range(self.epoch_num):
epoch_start_time = time.time()
self.model.train()
batch_idxs = list(BatchSampler(RandomSampler(
range(data_size)), batch_size=self.batch_size, drop_last=False))
for batch_idx in batch_idxs:
self.optimizer.zero_grad()
batch_x1 = x1[batch_idx, :]
batch_x2 = x2[batch_idx, :]
o1, o2 = self.model(batch_x1, batch_x2)
loss = self.loss(o1, o2)
train_losses.append(loss.item())
loss.backward()
self.optimizer.step()
train_loss = np.mean(train_losses)
info_string = "Epoch {:d}/{:d} - time: {:.2f} - training_loss: {:.4f}"
if vx1 is not None and vx2 is not None:
with torch.no_grad():
self.model.eval()
val_loss = self.test(vx1, vx2)
info_string += " - val_loss: {:.4f}".format(val_loss)
if val_loss < best_val_loss:
self.logger.info(
"Epoch {:d}: val_loss improved from {:.4f} to {:.4f}, saving model to {}".format(epoch + 1, best_val_loss, val_loss, checkpoint))
best_val_loss = val_loss
torch.save(self.model.state_dict(), checkpoint)
else:
self.logger.info("Epoch {:d}: val_loss did not improve from {:.4f}".format(
epoch + 1, best_val_loss))
else:
torch.save(self.model.state_dict(), checkpoint)
epoch_time = time.time() - epoch_start_time
self.logger.info(info_string.format(
epoch + 1, self.epoch_num, epoch_time, train_loss))
# train_linear_cca
if self.linear_cca is not None:
_, outputs = self._get_outputs(x1, x2)
self.train_linear_cca(outputs[0], outputs[1])
checkpoint_ = torch.load(checkpoint)
self.model.load_state_dict(checkpoint_)
if vx1 is not None and vx2 is not None:
loss = self.test(vx1, vx2)
self.logger.info("loss on validation data: {:.4f}".format(loss))
if tx1 is not None and tx2 is not None:
loss = self.test(tx1, tx2)
self.logger.info('loss on test data: {:.4f}'.format(loss))
def test(self, x1, x2, use_linear_cca=False):
with torch.no_grad():
losses, outputs = self._get_outputs(x1, x2)
if use_linear_cca:
print("Linear CCA started!")
outputs = self.linear_cca.test(outputs[0], outputs[1])
return np.mean(losses), outputs
else:
return np.mean(losses)
def train_linear_cca(self, x1, x2):
self.linear_cca.fit(x1, x2, self.outdim_size)
def _get_outputs(self, x1, x2):
with torch.no_grad():
self.model.eval()
data_size = x1.size(0)
batch_idxs = list(BatchSampler(SequentialSampler(
range(data_size)), batch_size=self.batch_size, drop_last=False))
losses = []
outputs1 = []
outputs2 = []
for batch_idx in batch_idxs:
batch_x1 = x1[batch_idx, :]
batch_x2 = x2[batch_idx, :]
o1, o2 = self.model(batch_x1, batch_x2)
outputs1.append(o1)
outputs2.append(o2)
loss = self.loss(o1, o2)
losses.append(loss.item())
outputs = [torch.cat(outputs1, dim=0).cpu().numpy(),
torch.cat(outputs2, dim=0).cpu().numpy()]
return losses, outputs
if __name__ == '__main__':
############
# Parameters Section
device = torch.device('cuda')
print("Using", torch.cuda.device_count(), "GPUs")
# the path to save the final learned features
save_to = './new_features.gz'
# the size of the new space learned by the model (number of the new features)
outdim_size = 10
# size of the input for view 1 and view 2
input_shape1 = 784
input_shape2 = 784
# number of layers with nodes in each one
layer_sizes1 = [1024, 1024, 1024, outdim_size]
layer_sizes2 = [1024, 1024, 1024, outdim_size]
# the parameters for training the network
learning_rate = 1e-3
epoch_num = 1
batch_size = 800
# the regularization parameter of the network
# seems necessary to avoid the gradient exploding especially when non-saturating activations are used
reg_par = 1e-5
# specifies if all the singular values should get used to calculate the correlation or just the top outdim_size ones
# if one option does not work for a network or dataset, try the other one
use_all_singular_values = False
# if a linear CCA should get applied on the learned features extracted from the networks
# it does not affect the performance on noisy MNIST significantly
apply_linear_cca = True
# end of parameters section
############
# Each view is stored in a gzip file separately. They will get downloaded the first time the code gets executed.
# Datasets get stored under the datasets folder of user's Keras folder
# normally under [Home Folder]/.keras/datasets/
data1 = load_data('./noisymnist_view1.gz')
data2 = load_data('./noisymnist_view2.gz')
# Building, training, and producing the new features by DCCA
model = DeepCCA(layer_sizes1, layer_sizes2, input_shape1,
input_shape2, outdim_size, use_all_singular_values, device=device).double()
l_cca = None
if apply_linear_cca:
l_cca = linear_cca()
solver = Solver(model, l_cca, outdim_size, epoch_num, batch_size,
learning_rate, reg_par, device=device)
train1, train2 = data1[0][0], data2[0][0]
val1, val2 = data1[1][0], data2[1][0]
test1, test2 = data1[2][0], data2[2][0]
solver.fit(train1, train2, val1, val2, test1, test2)
# TODO: Save l_cca model if needed
set_size = [0, train1.size(0), train1.size(
0) + val1.size(0), train1.size(0) + val1.size(0) + test1.size(0)]
loss, outputs = solver.test(torch.cat([train1, val1, test1], dim=0), torch.cat(
[train2, val2, test2], dim=0), apply_linear_cca)
new_data = []
# print(outputs)
for idx in range(3):
new_data.append([outputs[0][set_size[idx]:set_size[idx + 1], :],
outputs[1][set_size[idx]:set_size[idx + 1], :], data1[idx][1]])
# Training and testing of SVM with linear kernel on the view 1 with new features
[test_acc, valid_acc] = svm_classify(new_data, C=0.01)
print("Accuracy on view 1 (validation data) is:", valid_acc * 100.0)
print("Accuracy on view 1 (test data) is:", test_acc*100.0)
# Saving new features in a gzip pickled file specified by save_to
print('saving new features ...')
f1 = gzip.open(save_to, 'wb')
thepickle.dump(new_data, f1)
f1.close()
d = torch.load('checkpoint.model')
solver.model.load_state_dict(d)
solver.model.parameters()
|
0a65f8bf2b55c887daae37670ebdd8d958e60c7e
|
9587c0df0e5b36e2a9cdad9a2666885efe812620
|
/test/test_replication.py
|
593cd0abc003765bbee7cb0731587b7114eba40c
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] |
permissive
|
osmcode/pyosmium
|
c4d1b743824ea3bda8f3248d3e39a60b22c2940e
|
0459fb4126f51d215daca03620286c191676c0a1
|
refs/heads/master
| 2023-07-20T13:17:04.726937
| 2023-07-15T18:27:03
| 2023-07-15T18:27:03
| 24,344,280
| 277
| 63
|
BSD-2-Clause
| 2023-05-24T21:26:17
| 2014-09-22T20:25:42
|
Python
|
UTF-8
|
Python
| false
| false
| 10,975
|
py
|
test_replication.py
|
# SPDX-License-Identifier: BSD
#
# This file is part of Pyosmium.
#
# Copyright (C) 2022 Sarah Hoffmann.
from io import BytesIO
from textwrap import dedent
from urllib.error import URLError
import pytest
import requests.exceptions
from helpers import mkdate, CountingHandler
import osmium as o
import osmium.replication.server as rserv
import osmium.replication
class RequestsResponses(BytesIO):
def __init__(self, bytes):
super(RequestsResponses, self).__init__(bytes)
self.content = bytes
def iter_lines(self):
return self.readlines()
@pytest.mark.parametrize("inp,outp", [
(None, 'https://text.org/state.txt'),
(1, 'https://text.org/000/000/001.state.txt'),
(999, 'https://text.org/000/000/999.state.txt'),
(1000, 'https://text.org/000/001/000.state.txt'),
(573923, 'https://text.org/000/573/923.state.txt'),
(3290012, 'https://text.org/003/290/012.state.txt'),
])
def test_get_state_url(inp, outp):
svr = rserv.ReplicationServer("https://text.org")
assert outp == svr.get_state_url(inp)
@pytest.mark.parametrize("inp,outp", [
(1, 'https://who.is/me//000/000/001.osc.gz'),
(500, 'https://who.is/me//000/000/500.osc.gz'),
(83750, 'https://who.is/me//000/083/750.osc.gz'),
(999999999, 'https://who.is/me//999/999/999.osc.gz'),
])
def test_get_diff_url(inp, outp):
svr = rserv.ReplicationServer("https://who.is/me/")
assert outp, svr.get_diff_url(inp)
def test_get_newest_change_from_file(tmp_path):
fn = tmp_path / 'change.opl'
fn.write_text('n6365 v1 c63965061 t2018-10-29T03:56:07Z i8369524 ux x1 y7')
val = osmium.replication.newest_change_from_file(str(fn))
assert val == mkdate(2018, 10, 29, 3, 56, 7)
class TestReplication:
@pytest.fixture(params=["requests", "urllib"], autouse=True)
def setup_mocks(self, request, monkeypatch):
self.url_requests = []
self.url_exception = None
if request.param == "requests":
# Default use of the requests library. Simply patch the get method.
def mock_get(*args, **kwargs):
if self.url_exception is not None:
raise self.url_exception
assert self.url_requests
return RequestsResponses(self.url_requests.pop(0))
monkeypatch.setattr(osmium.replication.server.requests.Session, "get", mock_get)
def mk_server(*args, **kwargs):
return rserv.ReplicationServer(*args, **kwargs)
self.mk_replication_server = mk_server
elif request.param == "urllib":
def mock_get(*args, **kwargs):
if self.url_exception is not None:
raise self.url_exception
assert self.url_requests
return BytesIO(self.url_requests.pop(0))
def mk_server(*args, **kwargs):
server = rserv.ReplicationServer(*args, **kwargs)
server.open_url = mock_get
return server
self.mk_replication_server = mk_server
def set_result(self, content):
self.url_requests = [dedent(content).encode()]
def set_script(self, files):
self.url_requests = [dedent(s).encode() for s in files]
def test_get_state_valid(self):
self.set_result("""\
#Sat Aug 26 11:04:04 UTC 2017
txnMaxQueried=1219304113
sequenceNumber=2594669
timestamp=2017-08-26T11\\:04\\:02Z
txnReadyList=
txnMax=1219304113
txnActiveList=1219303583,1219304054,1219304104""")
res = self.mk_replication_server("https://test.io").get_state_info()
assert res is not None
assert res.timestamp == mkdate(2017, 8, 26, 11, 4, 2)
assert res.sequence == 2594669
def test_get_state_sequence_cut(self):
self.set_script(("""\
#Sat Aug 26 11:04:04 UTC 2017
txnMaxQueried=1219304113
sequenceNumber=259""",
"""\
#Sat Aug 26 11:04:04 UTC 2017
txnMaxQueried=1219304113
sequenceNumber=2594669
timestamp=2017-08-26T11\\:04\\:02Z"""))
res = self.mk_replication_server("https://test.io").get_state_info()
assert res is not None
assert res.timestamp == mkdate(2017, 8, 26, 11, 4, 2)
assert res.sequence == 2594669
def test_get_state_date_cut(self):
self.set_script(("""\
#Sat Aug 26 11:04:04 UTC 2017
txnMaxQueried=1219304113
sequenceNumber=2594669
timestamp=2017-08-2""",
"""\
#Sat Aug 26 11:04:04 UTC 2017
txnMaxQueried=1219304113
sequenceNumber=2594669
timestamp=2017-08-26T11\\:04\\:02Z"""))
res = self.mk_replication_server("https://test.io").get_state_info()
assert res is not None
assert res.timestamp == mkdate(2017, 8, 26, 11, 4, 2)
assert res.sequence == 2594669
def test_get_state_timestamp_cut(self):
self.set_script(("""\
#Sat Aug 26 11:04:04 UTC 2017
txnMaxQueried=1219304113
sequenceNumber=2594669
timestamp=""",
"""\
#Sat Aug 26 11:04:04 UTC 2017
txnMaxQueried=1219304113
sequenceNumber=2594669
timestamp=2017-08-26T11\\:04\\:02Z"""))
res = self.mk_replication_server("https://test.io").get_state_info()
assert res is not None
assert res.timestamp == mkdate(2017, 8, 26, 11, 4, 2)
assert res.sequence == 2594669
def test_get_state_too_many_retries(self):
self.set_script(("""\
#Sat Aug 26 11:04:04 UTC 2017
txnMaxQueried=1219304113
sequenceNumber=2594669
timestamp=""",
"""\
#Sat Aug 26 11:04:04 UTC 2017
txnMaxQueried=1219304113
sequenceNumber=2594669
timestamp=""",
"""\
#Sat Aug 26 11:04:04 UTC 2017
txnMaxQueried=1219304113
sequenceNumber=2594669
timestamp=""",
"""\
#Sat Aug 26 11:04:04 UTC 2017
txnMaxQueried=1219304113
sequenceNumber=2594669
timestamp=2017-08-26T11\\:04\\:02Z"""))
res = self.mk_replication_server("https://test.io").get_state_info()
assert res is None
@pytest.mark.parametrize("error", [URLError(reason='Mock'),
requests.exceptions.ConnectTimeout])
def test_get_state_server_timeout(self, error):
self.url_exception = error
svr = self.mk_replication_server("https://test.io")
assert svr.get_state_info() is None
def test_apply_diffs_count(self):
self.set_script(("""\
sequenceNumber=100
timestamp=2017-08-26T11\\:04\\:02Z
""", """
n1
w1
r1
"""))
svr = self.mk_replication_server("https://test.io", "opl")
h = CountingHandler()
assert 100 == svr.apply_diffs(h, 100, 10000)
assert h.counts == [1, 1, 1, 0]
def test_apply_diffs_without_simplify(self):
self.set_script(("""\
sequenceNumber=100
timestamp=2017-08-26T11\\:04\\:02Z
""", """
n1 v23
n1 v24
w1
r1
"""))
svr = self.mk_replication_server("https://test.io", "opl")
h = CountingHandler()
assert 100 == svr.apply_diffs(h, 100, 10000, simplify=False)
assert [2, 1, 1, 0] == h.counts
def test_apply_diffs_with_simplify(self):
self.set_script(("""\
sequenceNumber=100
timestamp=2017-08-26T11\\:04\\:02Z
""", """
n1 v23
n1 v24
w1
r1
"""))
svr = self.mk_replication_server("https://test.io", "opl")
h = CountingHandler()
assert 100 == svr.apply_diffs(h, 100, 10000, simplify=True)
assert [1, 1, 1, 0] == h.counts
def test_apply_with_location(self):
self.set_script(("""\
sequenceNumber=100
timestamp=2017-08-26T11\\:04\\:02Z
""", """
n1 x10.0 y23.0
w1 Nn1,n2
"""))
svr = self.mk_replication_server("https://test.io", "opl")
class Handler(CountingHandler):
def way(self, w):
self.counts[1] += 1
assert 2 == len(w.nodes)
assert 1 == w.nodes[0].ref
assert 10 == w.nodes[0].location.lon
assert 23 == w.nodes[0].location.lat
assert 2 == w.nodes[1].ref
assert not w.nodes[1].location.valid()
h = Handler()
assert 100 == svr.apply_diffs(h, 100, 10000, idx="flex_mem")
assert h.counts == [1, 1, 0, 0]
def test_apply_reader_without_simplify(self):
self.set_script(("""\
sequenceNumber=100
timestamp=2017-08-26T11\\:04\\:02Z
""", """
n1 v23
n1 v24
w1
r1
"""))
svr = self.mk_replication_server("https://test.io", "opl")
h = CountingHandler()
diffs = svr.collect_diffs(100, 100000)
assert diffs is not None
diffs.reader.apply(h, simplify=False)
assert [2, 1, 1, 0] == h.counts
def test_apply_reader_with_simplify(self):
self.set_script(("""\
sequenceNumber=100
timestamp=2017-08-26T11\\:04\\:02Z
""", """
n1 v23
n1 v24
w1
r1
"""))
svr = self.mk_replication_server("https://test.io", "opl")
h = CountingHandler()
diffs = svr.collect_diffs(100, 100000)
assert diffs is not None
diffs.reader.apply(h, simplify=True)
assert [1, 1, 1, 0] == h.counts
def test_apply_reader_with_location(self):
self.set_script(("""\
sequenceNumber=100
timestamp=2017-08-26T11\\:04\\:02Z
""", """
n1 x10.0 y23.0
w1 Nn1,n2
"""))
svr = self.mk_replication_server("https://test.io", "opl")
class Handler(CountingHandler):
def way(self, w):
self.counts[1] += 1
assert 2 == len(w.nodes)
assert 1 == w.nodes[0].ref
assert 10 == w.nodes[0].location.lon
assert 23 == w.nodes[0].location.lat
assert 2 == w.nodes[1].ref
assert not w.nodes[1].location.valid()
h = Handler()
diffs = svr.collect_diffs(100, 100000)
assert diffs is not None
diffs.reader.apply(h, idx="flex_mem")
assert h.counts == [1, 1, 0, 0]
|
0e3654e7c69e8ee843e2b8d2d673cdcbe9d3849e
|
56a77194fc0cd6087b0c2ca1fb6dc0de64b8a58a
|
/applications/ShapeOptimizationApplication/tests/opt_process_step_adaption_test/run_test.py
|
3eac5b6c6b8d31a7f4f33f61c16d064197a95329
|
[
"BSD-3-Clause"
] |
permissive
|
KratosMultiphysics/Kratos
|
82b902a2266625b25f17239b42da958611a4b9c5
|
366949ec4e3651702edc6ac3061d2988f10dd271
|
refs/heads/master
| 2023-08-30T20:31:37.818693
| 2023-08-30T18:01:01
| 2023-08-30T18:01:01
| 81,815,495
| 994
| 285
|
NOASSERTION
| 2023-09-14T13:22:43
| 2017-02-13T10:58:24
|
C++
|
UTF-8
|
Python
| false
| false
| 2,166
|
py
|
run_test.py
|
# Import Kratos core and apps
import KratosMultiphysics as KM
# Additional imports
from KratosMultiphysics.ShapeOptimizationApplication import optimizer_factory
from KratosMultiphysics.KratosUnittest import TestCase
import KratosMultiphysics.kratos_utilities as kratos_utilities
import csv, os
# Read parameters
with open("parameters.json",'r') as parameter_file:
parameters = KM.Parameters(parameter_file.read())
model = KM.Model()
# Create optimizer and perform optimization
optimizer = optimizer_factory.Create(model, parameters["optimization_settings"])
optimizer.Optimize()
# =======================================================================================================
# Test results and clean directory
# =======================================================================================================
output_directory = parameters["optimization_settings"]["output"]["output_directory"].GetString()
optimization_log_filename = parameters["optimization_settings"]["output"]["optimization_log_filename"].GetString() + ".csv"
optimization_model_part_name = parameters["optimization_settings"]["model_settings"]["model_part_name"].GetString()
# Testing
original_directory = os.getcwd()
os.chdir(output_directory)
with open(optimization_log_filename, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
last_line = None
for line in reader:
if not line:
continue
else:
last_line = line
resulting_optimization_iterations = int(last_line[0].strip())
resulting_improvement = float(last_line[2].strip())
resulting_gradient_norm = float(last_line[4].strip())
resulting_step_size = float(last_line[5].strip())
# Check against specifications
TestCase().assertEqual(resulting_optimization_iterations, 10)
TestCase().assertAlmostEqual(resulting_improvement, -8.79655E+00, 4)
TestCase().assertAlmostEqual(resulting_gradient_norm, 4.48563E+03, 4)
TestCase().assertAlmostEqual(resulting_step_size, 2.35795E-01, 4)
os.chdir(original_directory)
# =======================================================================================================
|
b7538e66613a1daef4e8cc87bcf5514dfe05798a
|
5e0f64162d53639183c8fcbe9b2ae13df699b4f6
|
/textattack/constraints/grammaticality/part_of_speech.py
|
f531f33c7c7667ac412ba737978bce2c7369eba3
|
[
"MIT"
] |
permissive
|
QData/TextAttack
|
b0295d995609c9844164a0d76b90405b74b56e3c
|
00adb8a55580f6dea5fd6952e93f095829e807dd
|
refs/heads/master
| 2023-08-27T15:01:42.367433
| 2023-07-24T16:06:19
| 2023-07-24T16:06:19
| 215,173,055
| 2,581
| 377
|
MIT
| 2023-09-11T16:57:00
| 2019-10-15T00:51:44
|
Python
|
UTF-8
|
Python
| false
| false
| 5,858
|
py
|
part_of_speech.py
|
"""
Part of Speech Constraint
--------------------------
"""
import flair
from flair.data import Sentence
from flair.models import SequenceTagger
import lru
import nltk
import textattack
from textattack.constraints import Constraint
from textattack.shared.utils import LazyLoader, device
from textattack.shared.validators import transformation_consists_of_word_swaps
# Set global flair device to be TextAttack's current device
flair.device = device
stanza = LazyLoader("stanza", globals(), "stanza")
class PartOfSpeech(Constraint):
"""Constraints word swaps to only swap words with the same part of speech.
Uses the NLTK universal part-of-speech tagger by default. An implementation
of `<https://arxiv.org/abs/1907.11932>`_ adapted from
`<https://github.com/jind11/TextFooler>`_.
POS taggers from Flair `<https://github.com/flairNLP/flair>`_ and
Stanza `<https://github.com/stanfordnlp/stanza>`_ are also available
Args:
tagger_type (str): Name of the tagger to use (available choices: "nltk", "flair", "stanza").
tagset (str): tagset to use for POS tagging (e.g. "universal")
allow_verb_noun_swap (bool): If `True`, allow verbs to be swapped with nouns and vice versa.
compare_against_original (bool): If `True`, compare against the original text.
Otherwise, compare against the most recent text.
language_nltk: Language to be used for nltk POS-Tagger
(available choices: "eng", "rus")
language_stanza: Language to be used for stanza POS-Tagger
(available choices: https://stanfordnlp.github.io/stanza/available_models.html)
"""
def __init__(
self,
tagger_type="nltk",
tagset="universal",
allow_verb_noun_swap=True,
compare_against_original=True,
language_nltk="eng",
language_stanza="en",
):
super().__init__(compare_against_original)
self.tagger_type = tagger_type
self.tagset = tagset
self.allow_verb_noun_swap = allow_verb_noun_swap
self.language_nltk = language_nltk
self.language_stanza = language_stanza
self._pos_tag_cache = lru.LRU(2**14)
if tagger_type == "flair":
if tagset == "universal":
self._flair_pos_tagger = SequenceTagger.load("upos-fast")
else:
self._flair_pos_tagger = SequenceTagger.load("pos-fast")
if tagger_type == "stanza":
self._stanza_pos_tagger = stanza.Pipeline(
lang=self.language_stanza,
processors="tokenize, pos",
tokenize_pretokenized=True,
)
def clear_cache(self):
self._pos_tag_cache.clear()
def _can_replace_pos(self, pos_a, pos_b):
return (pos_a == pos_b) or (
self.allow_verb_noun_swap and set([pos_a, pos_b]) <= set(["NOUN", "VERB"])
)
def _get_pos(self, before_ctx, word, after_ctx):
context_words = before_ctx + [word] + after_ctx
context_key = " ".join(context_words)
if context_key in self._pos_tag_cache:
word_list, pos_list = self._pos_tag_cache[context_key]
else:
if self.tagger_type == "nltk":
word_list, pos_list = zip(
*nltk.pos_tag(
context_words, tagset=self.tagset, lang=self.language_nltk
)
)
if self.tagger_type == "flair":
context_key_sentence = Sentence(
context_key,
use_tokenizer=textattack.shared.utils.TextAttackFlairTokenizer(),
)
self._flair_pos_tagger.predict(context_key_sentence)
word_list, pos_list = textattack.shared.utils.zip_flair_result(
context_key_sentence
)
if self.tagger_type == "stanza":
word_list, pos_list = textattack.shared.utils.zip_stanza_result(
self._stanza_pos_tagger(context_key), tagset=self.tagset
)
self._pos_tag_cache[context_key] = (word_list, pos_list)
# idx of `word` in `context_words`
assert word in word_list, "POS list not matched with original word list."
word_idx = word_list.index(word)
return pos_list[word_idx]
def _check_constraint(self, transformed_text, reference_text):
try:
indices = transformed_text.attack_attrs["newly_modified_indices"]
except KeyError:
raise KeyError(
"Cannot apply part-of-speech constraint without `newly_modified_indices`"
)
for i in indices:
reference_word = reference_text.words[i]
transformed_word = transformed_text.words[i]
before_ctx = reference_text.words[max(i - 4, 0) : i]
after_ctx = reference_text.words[
i + 1 : min(i + 4, len(reference_text.words))
]
ref_pos = self._get_pos(before_ctx, reference_word, after_ctx)
replace_pos = self._get_pos(before_ctx, transformed_word, after_ctx)
if not self._can_replace_pos(ref_pos, replace_pos):
return False
return True
def check_compatibility(self, transformation):
return transformation_consists_of_word_swaps(transformation)
def extra_repr_keys(self):
return [
"tagger_type",
"tagset",
"allow_verb_noun_swap",
] + super().extra_repr_keys()
def __getstate__(self):
state = self.__dict__.copy()
state["_pos_tag_cache"] = self._pos_tag_cache.get_size()
return state
def __setstate__(self, state):
self.__dict__ = state
self._pos_tag_cache = lru.LRU(state["_pos_tag_cache"])
|
2f5b0eeb4da6bd24b86be2a9c1e7f935f1ea86dc
|
45826bdfebbd1d7638ab607906ac480031d6118b
|
/lib/metrics/cityscapes/setup.py
|
abf3f313e25edd406e99dae2bdb8df6c35f301a0
|
[
"MIT"
] |
permissive
|
openseg-group/openseg.pytorch
|
b75cec5c95b6ff71707d8daf7806001bab89ecb3
|
aefc75517b09068d7131a69420bc5f66cb41f0ee
|
refs/heads/master
| 2023-09-06T10:19:57.749113
| 2022-08-07T09:10:20
| 2022-08-07T09:10:20
| 166,743,301
| 1,227
| 159
|
MIT
| 2021-07-14T06:10:44
| 2019-01-21T03:34:59
|
Python
|
UTF-8
|
Python
| false
| false
| 550
|
py
|
setup.py
|
#!/usr/bin/python
#
# Enable cython support for eval metrics
# Run as
# setup.py build_ext --inplace
#
# WARNING: Only tested for Ubuntu 64bit OS.
try:
from distutils.core import setup
from Cython.Build import cythonize
except:
print("Unable to setup. Please use pip to install: cython")
print("sudo pip install cython")
import os
import numpy
os.environ["CC"] = "g++"
os.environ["CXX"] = "g++"
setup(ext_modules = cythonize("lib/metrics/cityscapes/evaluation/addToConfusionMatrix.pyx"),
include_dirs=[numpy.get_include()])
|
c15c18bce5290dd762967e507de033c85ec6a844
|
c3ca0bcea4d1b4013a0891f014928922fc81fe7a
|
/d3rlpy/algos/qlearning/torch/bear_impl.py
|
10b83d99b2782b918299bb69be3af0588a2ade24
|
[
"MIT"
] |
permissive
|
takuseno/d3rlpy
|
47894b17fc21fab570eca39fe8e6925a7b5d7d6f
|
4ba297fc6cd62201f7cd4edb7759138182e4ce04
|
refs/heads/master
| 2023-08-23T12:27:45.305758
| 2023-08-14T12:07:03
| 2023-08-14T12:07:03
| 266,369,147
| 1,048
| 222
|
MIT
| 2023-09-02T08:12:48
| 2020-05-23T15:51:51
|
Python
|
UTF-8
|
Python
| false
| false
| 8,509
|
py
|
bear_impl.py
|
import dataclasses
from typing import Dict
import torch
from torch.optim import Optimizer
from ....dataset import Shape
from ....models.torch import (
ConditionalVAE,
ContinuousEnsembleQFunctionForwarder,
Parameter,
build_squashed_gaussian_distribution,
compute_max_with_n_actions_and_indices,
compute_vae_error,
forward_vae_sample_n,
)
from ....torch_utility import TorchMiniBatch, train_api
from .sac_impl import SACImpl, SACModules
__all__ = ["BEARImpl", "BEARModules"]
def _gaussian_kernel(
x: torch.Tensor, y: torch.Tensor, sigma: float
) -> torch.Tensor:
# x: (batch, n, 1, action), y: (batch, 1, n, action) -> (batch, n, n)
return (-((x - y) ** 2).sum(dim=3) / (2 * sigma)).exp()
def _laplacian_kernel(
x: torch.Tensor, y: torch.Tensor, sigma: float
) -> torch.Tensor:
# x: (batch, n, 1, action), y: (batch, 1, n, action) -> (batch, n, n)
return (-(x - y).abs().sum(dim=3) / (2 * sigma)).exp()
@dataclasses.dataclass(frozen=True)
class BEARModules(SACModules):
imitator: ConditionalVAE
log_alpha: Parameter
imitator_optim: Optimizer
alpha_optim: Optimizer
class BEARImpl(SACImpl):
_modules: BEARModules
_alpha_threshold: float
_lam: float
_n_action_samples: int
_n_target_samples: int
_n_mmd_action_samples: int
_mmd_kernel: str
_mmd_sigma: float
_vae_kl_weight: float
def __init__(
self,
observation_shape: Shape,
action_size: int,
modules: BEARModules,
q_func_forwarder: ContinuousEnsembleQFunctionForwarder,
targ_q_func_forwarder: ContinuousEnsembleQFunctionForwarder,
gamma: float,
tau: float,
alpha_threshold: float,
lam: float,
n_action_samples: int,
n_target_samples: int,
n_mmd_action_samples: int,
mmd_kernel: str,
mmd_sigma: float,
vae_kl_weight: float,
device: str,
):
super().__init__(
observation_shape=observation_shape,
action_size=action_size,
modules=modules,
q_func_forwarder=q_func_forwarder,
targ_q_func_forwarder=targ_q_func_forwarder,
gamma=gamma,
tau=tau,
device=device,
)
self._alpha_threshold = alpha_threshold
self._lam = lam
self._n_action_samples = n_action_samples
self._n_target_samples = n_target_samples
self._n_mmd_action_samples = n_mmd_action_samples
self._mmd_kernel = mmd_kernel
self._mmd_sigma = mmd_sigma
self._vae_kl_weight = vae_kl_weight
def compute_actor_loss(self, batch: TorchMiniBatch) -> torch.Tensor:
loss = super().compute_actor_loss(batch)
mmd_loss = self._compute_mmd_loss(batch.observations)
return loss + mmd_loss
@train_api
def warmup_actor(self, batch: TorchMiniBatch) -> Dict[str, float]:
self._modules.actor_optim.zero_grad()
loss = self._compute_mmd_loss(batch.observations)
loss.backward()
self._modules.actor_optim.step()
return {"actor_loss": float(loss.cpu().detach().numpy())}
def _compute_mmd_loss(self, obs_t: torch.Tensor) -> torch.Tensor:
mmd = self._compute_mmd(obs_t)
alpha = self._modules.log_alpha().exp()
return (alpha * (mmd - self._alpha_threshold)).mean()
@train_api
def update_imitator(self, batch: TorchMiniBatch) -> Dict[str, float]:
self._modules.imitator_optim.zero_grad()
loss = self.compute_imitator_loss(batch)
loss.backward()
self._modules.imitator_optim.step()
return {"imitator_loss": float(loss.cpu().detach().numpy())}
def compute_imitator_loss(self, batch: TorchMiniBatch) -> torch.Tensor:
return compute_vae_error(
vae=self._modules.imitator,
x=batch.observations,
action=batch.actions,
beta=self._vae_kl_weight,
)
@train_api
def update_alpha(self, batch: TorchMiniBatch) -> Dict[str, float]:
loss = -self._compute_mmd_loss(batch.observations)
self._modules.alpha_optim.zero_grad()
loss.backward()
self._modules.alpha_optim.step()
# clip for stability
self._modules.log_alpha.data.clamp_(-5.0, 10.0)
cur_alpha = self._modules.log_alpha().exp().cpu().detach().numpy()[0][0]
return {
"alpha_loss": float(loss.cpu().detach().numpy()),
"alpha": float(cur_alpha),
}
def _compute_mmd(self, x: torch.Tensor) -> torch.Tensor:
with torch.no_grad():
behavior_actions = forward_vae_sample_n(
self._modules.imitator,
x,
self._n_mmd_action_samples,
with_squash=False,
)
dist = build_squashed_gaussian_distribution(self._modules.policy(x))
policy_actions = dist.sample_n_without_squash(
self._n_mmd_action_samples
)
if self._mmd_kernel == "gaussian":
kernel = _gaussian_kernel
elif self._mmd_kernel == "laplacian":
kernel = _laplacian_kernel
else:
raise ValueError(f"Invalid kernel type: {self._mmd_kernel}")
# (batch, n, action) -> (batch, n, 1, action)
behavior_actions = behavior_actions.reshape(
x.shape[0], -1, 1, self.action_size
)
policy_actions = policy_actions.reshape(
x.shape[0], -1, 1, self.action_size
)
# (batch, n, action) -> (batch, 1, n, action)
behavior_actions_T = behavior_actions.reshape(
x.shape[0], 1, -1, self.action_size
)
policy_actions_T = policy_actions.reshape(
x.shape[0], 1, -1, self.action_size
)
# 1 / N^2 \sum k(a_\pi, a_\pi)
inter_policy = kernel(policy_actions, policy_actions_T, self._mmd_sigma)
mmd = inter_policy.mean(dim=[1, 2])
# 1 / N^2 \sum k(a_\beta, a_\beta)
inter_data = kernel(
behavior_actions, behavior_actions_T, self._mmd_sigma
)
mmd += inter_data.mean(dim=[1, 2])
# 2 / N^2 \sum k(a_\pi, a_\beta)
distance = kernel(policy_actions, behavior_actions_T, self._mmd_sigma)
mmd -= 2 * distance.mean(dim=[1, 2])
return (mmd + 1e-6).sqrt().view(-1, 1)
def compute_target(self, batch: TorchMiniBatch) -> torch.Tensor:
with torch.no_grad():
# BCQ-like target computation
dist = build_squashed_gaussian_distribution(
self._modules.policy(batch.next_observations)
)
actions, log_probs = dist.sample_n_with_log_prob(
self._n_target_samples
)
values, indices = compute_max_with_n_actions_and_indices(
batch.next_observations,
actions,
self._targ_q_func_forwarder,
self._lam,
)
# (batch, n, 1) -> (batch, 1)
batch_size = batch.observations.shape[0]
max_log_prob = log_probs[torch.arange(batch_size), indices]
return values - self._modules.log_temp().exp() * max_log_prob
def inner_predict_best_action(self, x: torch.Tensor) -> torch.Tensor:
with torch.no_grad():
# (batch, n, action)
dist = build_squashed_gaussian_distribution(self._modules.policy(x))
actions = dist.onnx_safe_sample_n(self._n_action_samples)
# (batch, n, action) -> (batch * n, action)
flat_actions = actions.reshape(-1, self._action_size)
# (batch, observation) -> (batch, 1, observation)
expanded_x = x.view(x.shape[0], 1, *x.shape[1:])
# (batch, 1, observation) -> (batch, n, observation)
repeated_x = expanded_x.expand(
x.shape[0], self._n_action_samples, *x.shape[1:]
)
# (batch, n, observation) -> (batch * n, observation)
flat_x = repeated_x.reshape(-1, *x.shape[1:])
# (batch * n, 1)
flat_values = self._q_func_forwarder.compute_expected_q(
flat_x, flat_actions, "none"
)[0]
# (batch, n)
values = flat_values.view(x.shape[0], self._n_action_samples)
# (batch, n) -> (batch,)
max_indices = torch.argmax(values, dim=1)
return actions[torch.arange(x.shape[0]), max_indices]
|
373e1d210aa3046302186c5ac74156a92a7286cf
|
7a6aca7d300c0752f2a73730b743a1a7361e941b
|
/tensorflow_graphics/image/tests/transformer_test.py
|
09f0bd5a9d489162bfb1072e89cbcbe7546c4bd0
|
[
"Apache-2.0"
] |
permissive
|
tensorflow/graphics
|
ef0abe102398a58eb7c41b709393df3d0b0a2811
|
1b0203eb538f2b6a1013ec7736d0d548416f059a
|
refs/heads/master
| 2023-09-03T20:41:25.992578
| 2023-08-08T21:16:36
| 2023-08-08T21:17:31
| 164,626,274
| 2,920
| 413
|
Apache-2.0
| 2023-08-27T14:26:47
| 2019-01-08T10:39:44
|
Python
|
UTF-8
|
Python
| false
| false
| 6,043
|
py
|
transformer_test.py
|
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for image transformation functionalities."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow_addons import image as tfa_image
from tensorflow_graphics.image import transformer
from tensorflow_graphics.util import test_case
class TransformerTest(test_case.TestCase, parameterized.TestCase):
@parameterized.parameters(
((None, 1, 2, None), (None, 3, 3)),
((1, 2, 3, 4), (1, 3, 3)),
)
def test_perspective_transform_exception_not_raised(self, *shape):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(transformer.perspective_transform,
shape)
@parameterized.parameters(
("must have a rank of 4.", (1, 1, 1), (1, 3, 3)),
("must have a rank of 3.", (1, 1, 1, 1), (3, 3)),
("Not all batch dimensions are identical.", (1, 1, 1, 1), (2, 3, 3)),
)
def test_perspective_transform_exception_raised(self, error_msg, *shape):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(transformer.perspective_transform,
error_msg, shape)
@parameterized.parameters(
(tf.float32, "NEAREST"),
(tf.float64, "NEAREST"),
(tf.float32, "BILINEAR"),
(tf.float64, "BILINEAR"),
)
def test_perspective_transform_half_integer_centers_preset(
self, dtype, interpolation):
"""Tests that we can reproduce the results of tf.image.resize."""
image = tf.constant(
((1.0, 2.0, 3.0), (4.0, 5.0, 6.0), (7.0, 8.0, 9.0), (10.0, 11.0, 12.0)),
dtype=dtype)
scale = 3
transformation = tf.constant(
((1.0 / scale, 0.0, 0.0), (0.0, 1.0 / scale, 0.0), (0.0, 0.0, 1.0)),
dtype=dtype)
image_shape = tf.shape(input=image)
image_resized_shape = image_shape * scale
image = image[tf.newaxis, ..., tf.newaxis]
transformation = transformation[tf.newaxis, ...]
image_resized = tf.image.resize(
image,
size=image_resized_shape,
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR
if interpolation == "NEAREST" else tf.image.ResizeMethod.BILINEAR)
image_transformed = transformer.perspective_transform(
image,
transformation,
resampling_type=transformer.ResamplingType.NEAREST
if interpolation == "NEAREST" else transformer.ResamplingType.BILINEAR,
border_type=transformer.BorderType.DUPLICATE,
output_shape=image_resized_shape)
self.assertAllClose(image_resized, image_transformed)
@parameterized.parameters(
(tf.float32, "NEAREST"),
(tf.float64, "NEAREST"),
(tf.float32, "BILINEAR"),
(tf.float64, "BILINEAR"),
)
def test_perspective_transform_integer_centers_preset(self, dtype,
interpolation):
"""Tests that we can reproduce the results of tfa_image.transform."""
image = tf.constant(
((1.0, 2.0, 3.0), (4.0, 5.0, 6.0), (7.0, 8.0, 9.0), (10.0, 11.0, 12.0)),
dtype=dtype)
scale = 3
transformation = tf.constant(
((1.0 / scale, 0.0, 0.0), (0.0, 1.0 / scale, 0.0), (0.0, 0.0, 1.0)),
dtype=dtype)
image_shape = tf.shape(input=image)
image_resized_shape = image_shape * scale
image = image[tf.newaxis, ..., tf.newaxis]
transformation = transformation[tf.newaxis, ...]
image_resized = tfa_image.transform(
tf.cast(image, tf.float32),
tf.cast(
tfa_image.transform_ops.matrices_to_flat_transforms(transformation),
tf.float32),
interpolation=interpolation,
output_shape=image_resized_shape)
image_transformed = transformer.perspective_transform(
image,
transformation,
resampling_type=transformer.ResamplingType.NEAREST
if interpolation == "NEAREST" else transformer.ResamplingType.BILINEAR,
pixel_type=transformer.PixelType.INTEGER,
output_shape=image_resized_shape)
self.assertAllClose(image_resized, image_transformed)
def test_perspective_transform_jacobian_random(self):
"""Tests the Jacobian of the transform function."""
tensor_shape = np.random.randint(2, 4, size=4)
image_init = np.random.uniform(0.0, 1.0, size=tensor_shape.tolist())
transformation_init = np.random.uniform(
0.0, 1.0, size=(tensor_shape[0], 3, 3))
self.assert_jacobian_is_correct_fn(
lambda x: transformer.perspective_transform(x, transformation_init),
[image_init])
self.assert_jacobian_is_correct_fn(
lambda x: transformer.perspective_transform(image_init, x),
[transformation_init])
@parameterized.parameters(
((None, 1, 2, None), (None, 2)),
((1, 3, 2, 4), (1, 2)),
)
def test_sample_exception_not_raised(self, *shape):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(transformer.sample, shape)
@parameterized.parameters(
("must have a rank of 4.", (1, 1, 1), (1, 2)),
("must have a rank greater than 1", (1, 1, 1, 1), (2,)),
("Not all batch dimensions are identical.", (1, 1, 1, 1), (2, 2)),
)
def test_sample_exception_raised(self, error_msg, *shape):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(transformer.sample, error_msg, shape)
if __name__ == "__main__":
test_case.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.