blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3b17bd810932ee635e24864482ef59430b9471e8
|
6f36df6219f8e50374068bb4b3e1a5387c7a2f34
|
/examples/convection/exponential1D/tri2D.py
|
4b7ae111ec641368df45c736ad14e143140f295b
|
[
"NIST-PD"
] |
permissive
|
usnistgov/fipy
|
0a3db715fea452ae710eea3999d9cd42dfe76fe7
|
fdc17193bc293da7511be9021e6d4766757e1966
|
refs/heads/master
| 2023-08-31T21:59:36.611448
| 2023-06-27T16:28:58
| 2023-06-27T16:28:58
| 23,316,495
| 444
| 171
|
NOASSERTION
| 2023-09-06T19:21:19
| 2014-08-25T14:27:58
|
Python
|
UTF-8
|
Python
| false
| false
| 1,705
|
py
|
tri2D.py
|
r"""
This example solves the steady-state convection-diffusion equation as described in
:mod:`examples.diffusion.convection.exponential1D.input` but uses a
:class:`~fipy.meshes.tri2D.Tri2D` mesh.
Here the axes are reversed (``nx = 1``, ``ny = 1000``) and
.. math::
\vec{u} = (0, 10)
>>> from fipy import CellVariable, Tri2D, DiffusionTerm, ExponentialConvectionTerm, DefaultAsymmetricSolver, Viewer
>>> from fipy.tools import numerix
>>> L = 10.
>>> nx = 1
>>> ny = 1000
>>> mesh = Tri2D(dx = L / ny, dy = L / ny, nx = nx, ny = ny)
>>> valueBottom = 0.
>>> valueTop = 1.
>>> var = CellVariable(name = "concentration",
... mesh = mesh,
... value = valueBottom)
>>> var.constrain(valueBottom, mesh.facesBottom)
>>> var.constrain(valueTop, mesh.facesTop)
>>> diffCoeff = 1.
>>> convCoeff = numerix.array(((0.,), (10.,)))
>>> eq = (DiffusionTerm(coeff=diffCoeff)
... + ExponentialConvectionTerm(coeff=convCoeff))
>>> eq.solve(var = var,
... solver=DefaultAsymmetricSolver(iterations=10000))
The analytical solution test for this problem is given by:
>>> axis = 1
>>> y = mesh.cellCenters[axis]
>>> CC = 1. - numerix.exp(-convCoeff[axis] * y / diffCoeff)
>>> DD = 1. - numerix.exp(-convCoeff[axis] * L / diffCoeff)
>>> analyticalArray = CC / DD
>>> print(var.allclose(analyticalArray, rtol = 1e-6, atol = 1e-6))
1
>>> if __name__ == '__main__':
... viewer = Viewer(vars = var)
... viewer.plot()
"""
from __future__ import unicode_literals
__docformat__ = 'restructuredtext'
from fipy import input
if __name__ == '__main__':
import fipy.tests.doctestPlus
exec(fipy.tests.doctestPlus._getScript())
input('finished')
|
41b7a902faf87390ad221c9b3f5c955ede78ea4a
|
fea765125139b71175f69525aca692ba349bcf48
|
/monitoring/error_reporting.py
|
5a21970a8dfc2d37232236b74989d652792567d5
|
[
"MIT"
] |
permissive
|
ourresearch/oadoi
|
4e39b93429d30cedcdc9d6d5f618de6ebab0f105
|
54750e4d58fc39c3714d934b47060949320a89b3
|
refs/heads/master
| 2023-09-05T18:01:53.743037
| 2023-08-28T14:33:33
| 2023-08-28T14:33:33
| 59,266,757
| 156
| 19
|
MIT
| 2023-05-01T21:36:52
| 2016-05-20T05:14:06
|
Python
|
UTF-8
|
Python
| false
| false
| 320
|
py
|
error_reporting.py
|
import json
from app import logger
from monitoring.email import send_email
def handle_papertrail_alert(alert):
payload = json.loads(alert.values['payload'])
pp_alert = json.dumps(payload, indent=2)
logger.info(u'got this papertrail alert:\n{}'.format(pp_alert))
send_email(pp_alert)
return alert
|
cbb5ff0c99f5f23cab5cad7f31d92f58d95e9315
|
f305f84ea6f721c2391300f0a60e21d2ce14f2a5
|
/11_动态规划/dp分类/区间dp/dfs/回文/中心扩展法求回文子串/2472. 不重叠回文子字符串的最大数目/2472. 不重叠回文子字符串的最大数目.py
|
93fea8f8e8b2120c1e6365fa78ed0ed4ea7133aa
|
[] |
no_license
|
981377660LMT/algorithm-study
|
f2ada3e6959338ae1bc21934a84f7314a8ecff82
|
7e79e26bb8f641868561b186e34c1127ed63c9e0
|
refs/heads/master
| 2023-09-01T18:26:16.525579
| 2023-09-01T12:21:58
| 2023-09-01T12:21:58
| 385,861,235
| 225
| 24
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,151
|
py
|
2472. 不重叠回文子字符串的最大数目.py
|
# 给你一个字符串 s 和一个 正 整数 k 。
# 从字符串 s 中选出一组满足下述条件且 不重叠 的子字符串:
# !每个子字符串的长度 至少 为 k 。
# !每个子字符串是一个 回文串 。
# !返回最优方案中能选择的子字符串的 最大 数目。
class Solution:
def maxPalindromes2(self, s: str, k: int) -> int:
"""
贪心+马拉车 O(n)
我们只需要考虑长度为k和k+1的回文串数目就行。
如果k+2i是回文串,那么掐头去尾,肯定有长度为k的回文串,
要数目最多,我们就选最短的。
!只需要判断 [i,i+k-1] 和 [i,i+k]是否为回文串即可,
!使用 manacher 算法可以在 O(n) 时间内判断一个子串是否为回文串
"""
# !js-algorithm\17_模式匹配\马拉车拉马\2472. 不重叠回文子字符串的最大数目.py
...
def maxPalindromes1(self, s: str, k: int) -> int:
"""O(n^2)dp"""
def expand(left: int, right: int) -> None:
"""中心扩展法求s[left:right+1]是否为回文串"""
while left >= 0 and right < len(s) and s[left] == s[right]:
if right - left + 1 >= k:
isPalindrome[left][right] = True
left -= 1
right += 1
n = len(s)
isPalindrome = [[False] * n for _ in range(n)] # dp[i][j] 表示 s[i:j+1] 是否是回文串
for i in range(n):
expand(i, i)
expand(i, i + 1)
# 选出最多数量的区间,使得它们互不重叠 (dp)
dp = [0] * (n + 1) # 第i个字符结尾的最多不重叠回文子串数目
for i in range(1, n + 1):
dp[i] = dp[i - 1] # jump
for j in range(i - k + 1): # not jump
if isPalindrome[j][i - 1]:
dp[i] = max(dp[i], dp[j] + 1)
return dp[-1]
print(Solution().maxPalindromes1(s="abaccdbbd", k=3))
print(Solution().maxPalindromes1(s="iqqibcecvrbxxj", k=1))
print(Solution().maxPalindromes1(s="i" * 2000, k=1))
|
8772345250882b91030a89cd14b0d40e5f48aeb0
|
dacdebab897f9287f37a2e85c5705a926ddd36aa
|
/tests/common.py
|
7f5f4666f6e748a13ba0adbdbe9efd826cc5d424
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
snakemake/snakemake
|
5d4528193d87786d7b372ca7653ece302ff46965
|
27b224ed12448df8aebc7d1ff8f25e3bf7622232
|
refs/heads/main
| 2023-09-02T08:37:04.323976
| 2023-08-11T10:02:34
| 2023-08-11T10:02:34
| 212,840,200
| 1,941
| 536
|
MIT
| 2023-09-11T09:51:44
| 2019-10-04T14:58:11
|
HTML
|
UTF-8
|
Python
| false
| false
| 9,867
|
py
|
common.py
|
__authors__ = ["Tobias Marschall", "Marcel Martin", "Johannes Köster"]
__copyright__ = "Copyright 2022, Johannes Köster"
__email__ = "johannes.koester@uni-due.de"
__license__ = "MIT"
import os
import signal
import sys
import shlex
import shutil
import time
from os.path import join
import tempfile
import hashlib
import urllib
import pytest
import glob
import subprocess
import tarfile
from snakemake.api import snakemake
from snakemake.shell import shell
from snakemake.common import ON_WINDOWS
from snakemake.resources import DefaultResources, GroupResources, ResourceScopes
def dpath(path):
"""get the path to a data file (relative to the directory this
test lives in)"""
return os.path.realpath(join(os.path.dirname(__file__), path))
def md5sum(filename, ignore_newlines=False):
if ignore_newlines:
with open(filename, "r", encoding="utf-8", errors="surrogateescape") as f:
data = f.read().strip().encode("utf8", errors="surrogateescape")
else:
data = open(filename, "rb").read().strip()
return hashlib.md5(data).hexdigest()
# test skipping
def is_connected():
try:
urllib.request.urlopen("http://www.google.com", timeout=1)
return True
except urllib.request.URLError:
return False
def is_ci():
return "CI" in os.environ
def has_gcloud_service_key():
return "GCP_AVAILABLE" in os.environ
def has_azbatch_account_url():
return os.environ.get("AZ_BATCH_ACCOUNT_URL")
def has_zenodo_token():
return os.environ.get("ZENODO_SANDBOX_PAT")
gcloud = pytest.mark.skipif(
not is_connected() or not has_gcloud_service_key(),
reason="Skipping GCLOUD tests because not on "
"CI, no inet connection or not logged "
"in to gcloud.",
)
azbatch = pytest.mark.skipif(
not is_connected() or not has_azbatch_account_url(),
reason="Skipping AZBATCH tests because "
"no inet connection or no AZ_BATCH_ACCOUNT_URL.",
)
connected = pytest.mark.skipif(not is_connected(), reason="no internet connection")
ci = pytest.mark.skipif(not is_ci(), reason="not in CI")
not_ci = pytest.mark.skipif(is_ci(), reason="skipped in CI")
zenodo = pytest.mark.skipif(
not has_zenodo_token(), reason="no ZENODO_SANDBOX_PAT provided"
)
def copy(src, dst):
if os.path.isdir(src):
shutil.copytree(src, os.path.join(dst, os.path.basename(src)))
else:
shutil.copy(src, dst)
def get_expected_files(results_dir):
"""Recursively walk through the expected-results directory to enumerate
all expected files."""
return [
os.path.relpath(f, results_dir)
for f in glob.iglob(os.path.join(results_dir, "**/**"), recursive=True)
if not os.path.isdir(f)
]
def untar_folder(tar_file, output_path):
if not os.path.isdir(output_path):
with tarfile.open(tar_file) as tar:
tar.extractall(path=output_path)
def print_tree(path, exclude=None):
for root, _dirs, files in os.walk(path):
if exclude and root.startswith(os.path.join(path, exclude)):
continue
level = root.replace(path, "").count(os.sep)
indent = " " * 4 * level
print(f"{indent}{os.path.basename(root)}/")
subindent = " " * 4 * (level + 1)
for f in files:
print(f"{subindent}{f}")
def run(
path,
shouldfail=False,
snakefile="Snakefile",
subpath=None,
no_tmpdir=False,
check_md5=True,
check_results=None,
cores=3,
nodes=None,
set_pythonpath=True,
cleanup=True,
conda_frontend="mamba",
config=dict(),
targets=None,
container_image=os.environ.get("CONTAINER_IMAGE", "snakemake/snakemake:latest"),
shellcmd=None,
sigint_after=None,
overwrite_resource_scopes=None,
**params,
):
"""
Test the Snakefile in the path.
There must be a Snakefile in the path and a subdirectory named
expected-results. If cleanup is False, we return the temporary
directory to the calling test for inspection, and the test should
clean it up.
"""
if check_results is None:
if not shouldfail:
check_results = True
else:
check_results = False
if set_pythonpath:
# Enforce current workdir (the snakemake source dir) to also be in PYTHONPATH
# when subprocesses are invoked in the tempdir defined below.
os.environ["PYTHONPATH"] = os.getcwd()
elif "PYTHONPATH" in os.environ:
del os.environ["PYTHONPATH"]
results_dir = join(path, "expected-results")
original_snakefile = join(path, snakefile)
assert os.path.exists(original_snakefile)
assert os.path.exists(results_dir) and os.path.isdir(
results_dir
), "{} does not exist".format(results_dir)
# If we need to further check results, we won't cleanup tmpdir
tmpdir = next(tempfile._get_candidate_names())
tmpdir = os.path.join(tempfile.gettempdir(), "snakemake-%s" % tmpdir)
os.mkdir(tmpdir)
config = dict(config)
# handle subworkflow
if subpath is not None:
# set up a working directory for the subworkflow and pass it in `config`
# for now, only one subworkflow is supported
assert os.path.exists(subpath) and os.path.isdir(
subpath
), "{} does not exist".format(subpath)
subworkdir = os.path.join(tmpdir, "subworkdir")
os.mkdir(subworkdir)
# copy files
for f in os.listdir(subpath):
copy(os.path.join(subpath, f), subworkdir)
config["subworkdir"] = subworkdir
# copy files
for f in os.listdir(path):
copy(os.path.join(path, f), tmpdir)
# Snakefile is now in temporary directory
snakefile = join(tmpdir, snakefile)
# run snakemake
if shellcmd:
if not shellcmd.startswith("snakemake"):
raise ValueError("shellcmd does not start with snakemake")
shellcmd = "{} -m {}".format(sys.executable, shellcmd)
try:
if sigint_after is None:
subprocess.run(
shellcmd,
cwd=path if no_tmpdir else tmpdir,
check=True,
shell=True,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
)
success = True
else:
with subprocess.Popen(
shlex.split(shellcmd),
cwd=path if no_tmpdir else tmpdir,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
) as process:
time.sleep(sigint_after)
process.send_signal(signal.SIGINT)
time.sleep(2)
success = process.returncode == 0
except subprocess.CalledProcessError as e:
success = False
print(e.stdout.decode(), file=sys.stderr)
else:
assert sigint_after is None, "Cannot sent SIGINT when calling directly"
success = snakemake(
snakefile=original_snakefile if no_tmpdir else snakefile,
cores=cores,
nodes=nodes,
workdir=path if no_tmpdir else tmpdir,
stats="stats.txt",
config=config,
verbose=True,
targets=targets,
conda_frontend=conda_frontend,
container_image=container_image,
overwrite_resource_scopes=(
ResourceScopes(overwrite_resource_scopes)
if overwrite_resource_scopes is not None
else overwrite_resource_scopes
),
**params,
)
if shouldfail:
assert not success, "expected error on execution"
else:
if not success:
print("Workdir:")
print_tree(tmpdir, exclude=".snakemake/conda")
assert success, "expected successful execution"
if check_results:
for resultfile in get_expected_files(results_dir):
if resultfile in [".gitignore", ".gitkeep"] or not os.path.isfile(
os.path.join(results_dir, resultfile)
):
# this means tests cannot use directories as output files
continue
targetfile = join(tmpdir, resultfile)
expectedfile = join(results_dir, resultfile)
if ON_WINDOWS:
if os.path.exists(join(results_dir, resultfile + "_WIN")):
continue # Skip test if a Windows specific file exists
if resultfile.endswith("_WIN"):
targetfile = join(tmpdir, resultfile[:-4])
elif resultfile.endswith("_WIN"):
# Skip win specific result files on Posix platforms
continue
assert os.path.exists(targetfile), 'expected file "{}" not produced'.format(
resultfile
)
if check_md5:
md5expected = md5sum(expectedfile, ignore_newlines=ON_WINDOWS)
md5target = md5sum(targetfile, ignore_newlines=ON_WINDOWS)
if md5target != md5expected:
with open(expectedfile) as expected:
expected_content = expected.read().strip()
with open(targetfile) as target:
content = target.read().strip()
assert (
False
), "wrong result produced for file '{resultfile}':\n------found------\n{content}\n-----expected-----\n{expected_content}\n-----------------".format(
resultfile=resultfile,
content=content,
expected_content=expected_content,
)
if not cleanup:
return tmpdir
shutil.rmtree(tmpdir, ignore_errors=ON_WINDOWS)
|
405d9d857b75e620a116bde5200ac70ff3231b21
|
03666e5f961946fc1a0ac67781ac1425562ef0d7
|
/src/test/tests/hybrid/qcrit.py
|
f02131eba31cd7e861b17b8ca2492425721ac6b2
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
visit-dav/visit
|
e9f81b4d4b9b9930a0db9d5282cd1bcabf465e2e
|
601ae46e0bef2e18425b482a755d03490ade0493
|
refs/heads/develop
| 2023-09-06T08:19:38.397058
| 2023-09-05T21:29:32
| 2023-09-05T21:29:32
| 165,565,988
| 335
| 120
|
BSD-3-Clause
| 2023-09-14T00:53:37
| 2019-01-13T23:27:26
|
C
|
UTF-8
|
Python
| false
| false
| 1,162
|
py
|
qcrit.py
|
# ----------------------------------------------------------------------------
# CLASSES: nightly
#
# Test Case: qcrit.py
#
# Tests:# plots - pseudocolor, contour
# Defect ID: 1829
#
# Programmer: Kevin Griffin
# Date: Thu Jul 31 14:21:02 PDT 2014
#
# Modifications:
#
# ----------------------------------------------------------------------------
ds = data_path("miranda_test_data/TG_vortex/plot.raw")
#ds = "/g/g14/kgriffin/trunk/build_debug/data/miranda_test_data/TG_vortex/plot.raw"
OpenDatabase(ds)
DefineScalarExpression("qcrit", "q_criterion(gradient(velocity[0]), gradient(velocity[1]), gradient(velocity[2]))")
TimeSliderNextState()
TimeSliderNextState()
#
# Test 1
#
AddPlot("Pseudocolor", "qcrit")
AddOperator("Isovolume")
IsovolumeAtts = IsovolumeAttributes()
IsovolumeAtts.lbound = 0.2
IsovolumeAtts.ubound = 5.0
IsovolumeAtts.variable = "default"
SetOperatorOptions(IsovolumeAtts)
DrawPlots()
Test("iso_qcrit")
DeleteAllPlots()
#
# Test 2
#
AddPlot("Contour", "qcrit")
DrawPlots()
Test("contour_qcrit")
DeleteAllPlots()
#
# Test 3
#
AddPlot("Pseudocolor", "qcrit")
DrawPlots()
Test("pseudo_qcrit")
DeleteAllPlots()
Exit()
|
83892f6c286e163c1823af89defe3ef20649385a
|
7fa7a9e87ecbaefc1414219701fbf8c789f639ac
|
/gnpy/tools/cli_examples.py
|
ba0e226658652161ee5a3ace982a174c5c7503c5
|
[
"BSD-3-Clause"
] |
permissive
|
Telecominfraproject/oopt-gnpy
|
4f804ba3fc1be5712b78d89d8c029a4546813334
|
70731b64d68ea199d369feb7e3178d3e72d0b48b
|
refs/heads/master
| 2023-08-08T03:15:56.015462
| 2023-06-26T11:47:29
| 2023-07-24T15:13:15
| 96,894,149
| 156
| 88
|
BSD-3-Clause
| 2023-03-03T15:35:56
| 2017-07-11T13:08:42
|
Python
|
UTF-8
|
Python
| false
| false
| 22,055
|
py
|
cli_examples.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
gnpy.tools.cli_examples
=======================
Common code for CLI examples
"""
import argparse
import logging
import sys
from math import ceil
from numpy import linspace, mean
from pathlib import Path
import gnpy.core.ansi_escapes as ansi_escapes
from gnpy.core.elements import Transceiver, Fiber, RamanFiber
from gnpy.core.equipment import trx_mode_params
import gnpy.core.exceptions as exceptions
from gnpy.core.network import build_network
from gnpy.core.parameters import SimParams
from gnpy.core.utils import db2lin, lin2db, automatic_nch
from gnpy.topology.request import (ResultElement, jsontocsv, compute_path_dsjctn, requests_aggregation,
BLOCKING_NOPATH, correct_json_route_list,
deduplicate_disjunctions, compute_path_with_disjunction,
PathRequest, compute_constrained_path, propagate)
from gnpy.topology.spectrum_assignment import build_oms_list, pth_assign_spectrum
from gnpy.tools.json_io import (load_equipment, load_network, load_json, load_requests, save_network,
requests_from_json, disjunctions_from_json, save_json, load_initial_spectrum)
from gnpy.tools.plots import plot_baseline, plot_results
_logger = logging.getLogger(__name__)
_examples_dir = Path(__file__).parent.parent / 'example-data'
_help_footer = '''
This program is part of GNPy, https://github.com/TelecomInfraProject/oopt-gnpy
Learn more at https://gnpy.readthedocs.io/
'''
_help_fname_json = 'FILE.json'
_help_fname_json_csv = 'FILE.(json|csv)'
def show_example_data_dir():
print(f'{_examples_dir}/')
def load_common_data(equipment_filename, topology_filename, simulation_filename, save_raw_network_filename):
"""Load common configuration from JSON files"""
try:
equipment = load_equipment(equipment_filename)
network = load_network(topology_filename, equipment)
if save_raw_network_filename is not None:
save_network(network, save_raw_network_filename)
print(f'{ansi_escapes.blue}Raw network (no optimizations) saved to {save_raw_network_filename}{ansi_escapes.reset}')
if not simulation_filename:
sim_params = {}
if next((node for node in network if isinstance(node, RamanFiber)), None) is not None:
print(f'{ansi_escapes.red}Invocation error:{ansi_escapes.reset} '
f'RamanFiber requires passing simulation params via --sim-params')
sys.exit(1)
else:
sim_params = load_json(simulation_filename)
SimParams.set_params(sim_params)
except exceptions.EquipmentConfigError as e:
print(f'{ansi_escapes.red}Configuration error in the equipment library:{ansi_escapes.reset} {e}')
sys.exit(1)
except exceptions.NetworkTopologyError as e:
print(f'{ansi_escapes.red}Invalid network definition:{ansi_escapes.reset} {e}')
sys.exit(1)
except exceptions.ParametersError as e:
print(f'{ansi_escapes.red}Simulation parameters error:{ansi_escapes.reset} {e}')
sys.exit(1)
except exceptions.ConfigurationError as e:
print(f'{ansi_escapes.red}Configuration error:{ansi_escapes.reset} {e}')
sys.exit(1)
except exceptions.ServiceError as e:
print(f'{ansi_escapes.red}Service error:{ansi_escapes.reset} {e}')
sys.exit(1)
return (equipment, network)
def _setup_logging(args):
logging.basicConfig(level={2: logging.DEBUG, 1: logging.INFO, 0: logging.CRITICAL}.get(args.verbose, logging.DEBUG))
def _add_common_options(parser: argparse.ArgumentParser, network_default: Path):
parser.add_argument('topology', nargs='?', type=Path, metavar='NETWORK-TOPOLOGY.(json|xls|xlsx)',
default=network_default,
help='Input network topology')
parser.add_argument('-v', '--verbose', action='count', default=0,
help='Increase verbosity (can be specified several times)')
parser.add_argument('-e', '--equipment', type=Path, metavar=_help_fname_json,
default=_examples_dir / 'eqpt_config.json', help='Equipment library')
parser.add_argument('--sim-params', type=Path, metavar=_help_fname_json,
default=None, help='Path to the JSON containing simulation parameters (required for Raman). '
f'Example: {_examples_dir / "sim_params.json"}')
parser.add_argument('--save-network', type=Path, metavar=_help_fname_json,
help='Save the final network as a JSON file')
parser.add_argument('--save-network-before-autodesign', type=Path, metavar=_help_fname_json,
help='Dump the network into a JSON file prior to autodesign')
parser.add_argument('--no-insert-edfas', action='store_true',
help='Disable insertion of EDFAs after ROADMs and fibers '
'as well as splitting of fibers by auto-design.')
def transmission_main_example(args=None):
parser = argparse.ArgumentParser(
description='Send a full spectrum load through the network from point A to point B',
epilog=_help_footer,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
_add_common_options(parser, network_default=_examples_dir / 'edfa_example_network.json')
parser.add_argument('--show-channels', action='store_true', help='Show final per-channel OSNR and GSNR summary')
parser.add_argument('-pl', '--plot', action='store_true')
parser.add_argument('-l', '--list-nodes', action='store_true', help='list all transceiver nodes')
parser.add_argument('-po', '--power', default=0, help='channel ref power in dBm')
parser.add_argument('--spectrum', type=Path, help='user defined mixed rate spectrum JSON file')
parser.add_argument('source', nargs='?', help='source node')
parser.add_argument('destination', nargs='?', help='destination node')
args = parser.parse_args(args if args is not None else sys.argv[1:])
_setup_logging(args)
(equipment, network) = load_common_data(args.equipment, args.topology, args.sim_params, args.save_network_before_autodesign)
if args.plot:
plot_baseline(network)
transceivers = {n.uid: n for n in network.nodes() if isinstance(n, Transceiver)}
if not transceivers:
sys.exit('Network has no transceivers!')
if len(transceivers) < 2:
sys.exit('Network has only one transceiver!')
if args.list_nodes:
for uid in transceivers:
print(uid)
sys.exit()
# First try to find exact match if source/destination provided
if args.source:
source = transceivers.pop(args.source, None)
valid_source = True if source else False
else:
source = None
_logger.info('No source node specified: picking random transceiver')
if args.destination:
destination = transceivers.pop(args.destination, None)
valid_destination = True if destination else False
else:
destination = None
_logger.info('No destination node specified: picking random transceiver')
# If no exact match try to find partial match
if args.source and not source:
# TODO code a more advanced regex to find nodes match
source = next((transceivers.pop(uid) for uid in transceivers
if args.source.lower() in uid.lower()), None)
if args.destination and not destination:
# TODO code a more advanced regex to find nodes match
destination = next((transceivers.pop(uid) for uid in transceivers
if args.destination.lower() in uid.lower()), None)
# If no partial match or no source/destination provided pick random
if not source:
source = list(transceivers.values())[0]
del transceivers[source.uid]
if not destination:
destination = list(transceivers.values())[0]
_logger.info(f'source = {args.source!r}')
_logger.info(f'destination = {args.destination!r}')
params = {}
params['request_id'] = 0
params['trx_type'] = ''
params['trx_mode'] = ''
params['source'] = source.uid
params['destination'] = destination.uid
params['bidir'] = False
params['nodes_list'] = [destination.uid]
params['loose_list'] = ['strict']
params['format'] = ''
params['path_bandwidth'] = 0
params['effective_freq_slot'] = None
trx_params = trx_mode_params(equipment)
if args.power:
trx_params['power'] = db2lin(float(args.power)) * 1e-3
params.update(trx_params)
initial_spectrum = None
nb_channels = automatic_nch(trx_params['f_min'], trx_params['f_max'], trx_params['spacing'])
if args.spectrum:
initial_spectrum = load_initial_spectrum(args.spectrum)
nb_channels = len(initial_spectrum)
print('User input for spectrum used for propagation instead of SI')
params['nb_channel'] = nb_channels
req = PathRequest(**params)
req.initial_spectrum = initial_spectrum
print(f'There are {nb_channels} channels propagating')
power_mode = equipment['Span']['default'].power_mode
print('\n'.join([f'Power mode is set to {power_mode}',
f'=> it can be modified in eqpt_config.json - Span']))
# Keep the reference channel for design: the one from SI, with full load same channels
pref_ch_db = lin2db(req.power * 1e3) # reference channel power / span (SL=20dB)
pref_total_db = pref_ch_db + lin2db(req.nb_channel) # reference total power / span (SL=20dB)
try:
build_network(network, equipment, pref_ch_db, pref_total_db, args.no_insert_edfas)
except exceptions.NetworkTopologyError as e:
print(f'{ansi_escapes.red}Invalid network definition:{ansi_escapes.reset} {e}')
sys.exit(1)
except exceptions.ConfigurationError as e:
print(f'{ansi_escapes.red}Configuration error:{ansi_escapes.reset} {e}')
sys.exit(1)
path = compute_constrained_path(network, req)
spans = [s.params.length for s in path if isinstance(s, RamanFiber) or isinstance(s, Fiber)]
print(f'\nThere are {len(spans)} fiber spans over {sum(spans)/1000:.0f} km between {source.uid} '
f'and {destination.uid}')
print(f'\nNow propagating between {source.uid} and {destination.uid}:')
power_range = [0]
if power_mode:
# power cannot be changed in gain mode
try:
p_start, p_stop, p_step = equipment['SI']['default'].power_range_db
p_num = abs(int(round((p_stop - p_start) / p_step))) + 1 if p_step != 0 else 1
power_range = list(linspace(p_start, p_stop, p_num))
except TypeError:
print('invalid power range definition in eqpt_config, should be power_range_db: [lower, upper, step]')
for dp_db in power_range:
req.power = db2lin(pref_ch_db + dp_db) * 1e-3
# if initial spectrum did not contain any power, now we need to use this one.
# note the initial power defines a differential wrt req.power so that if req.power is set to 2mW (3dBm)
# and initial spectrum was set to 0, this sets a initial per channel delta power to -3dB, so that
# whatever the equalization, -3 dB is applied on all channels (ie initial power in initial spectrum pre-empts
# "--power" option)
if power_mode:
print(f'\nPropagating with input power = {ansi_escapes.cyan}{lin2db(req.power*1e3):.2f} dBm{ansi_escapes.reset}:')
else:
print(f'\nPropagating in {ansi_escapes.cyan}gain mode{ansi_escapes.reset}: power cannot be set manually')
infos = propagate(path, req, equipment)
if len(power_range) == 1:
for elem in path:
print(elem)
if power_mode:
print(f'\nTransmission result for input power = {lin2db(req.power*1e3):.2f} dBm:')
else:
print(f'\nTransmission results:')
print(f' Final GSNR (0.1 nm): {ansi_escapes.cyan}{mean(destination.snr_01nm):.02f} dB{ansi_escapes.reset}')
else:
print(path[-1])
if args.save_network is not None:
save_network(network, args.save_network)
print(f'{ansi_escapes.blue}Network (after autodesign) saved to {args.save_network}{ansi_escapes.reset}')
if args.show_channels:
print('\nThe GSNR per channel at the end of the line is:')
print(
'{:>5}{:>26}{:>26}{:>28}{:>28}{:>28}' .format(
'Ch. #',
'Channel frequency (THz)',
'Channel power (dBm)',
'OSNR ASE (signal bw, dB)',
'SNR NLI (signal bw, dB)',
'GSNR (signal bw, dB)'))
for final_carrier, ch_osnr, ch_snr_nl, ch_snr in zip(
infos.carriers, path[-1].osnr_ase, path[-1].osnr_nli, path[-1].snr):
ch_freq = final_carrier.frequency * 1e-12
ch_power = lin2db(final_carrier.power.signal * 1e3)
print(
'{:5}{:26.5f}{:26.2f}{:28.2f}{:28.2f}{:28.2f}' .format(
final_carrier.channel_number, round(
ch_freq, 5), round(
ch_power, 2), round(
ch_osnr, 2), round(
ch_snr_nl, 2), round(
ch_snr, 2)))
if not args.source:
print(f'\n(No source node specified: picked {source.uid})')
elif not valid_source:
print(f'\n(Invalid source node {args.source!r} replaced with {source.uid})')
if not args.destination:
print(f'\n(No destination node specified: picked {destination.uid})')
elif not valid_destination:
print(f'\n(Invalid destination node {args.destination!r} replaced with {destination.uid})')
if args.plot:
plot_results(network, path, source, destination)
def _path_result_json(pathresult):
return {'response': [n.json for n in pathresult]}
def path_requests_run(args=None):
parser = argparse.ArgumentParser(
description='Compute performance for a list of services provided in a json file or an excel sheet',
epilog=_help_footer,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
_add_common_options(parser, network_default=_examples_dir / 'meshTopologyExampleV2.xls')
parser.add_argument('service_filename', nargs='?', type=Path, metavar='SERVICES-REQUESTS.(json|xls|xlsx)',
default=_examples_dir / 'meshTopologyExampleV2.xls',
help='Input service file')
parser.add_argument('-bi', '--bidir', action='store_true',
help='considers that all demands are bidir')
parser.add_argument('-o', '--output', type=Path, metavar=_help_fname_json_csv,
help='Store satisifed requests into a JSON or CSV file')
args = parser.parse_args(args if args is not None else sys.argv[1:])
_setup_logging(args)
_logger.info(f'Computing path requests {args.service_filename} into JSON format')
(equipment, network) = load_common_data(args.equipment, args.topology, args.sim_params, args.save_network_before_autodesign)
# Build the network once using the default power defined in SI in eqpt config
# TODO power density: db2linp(ower_dbm": 0)/power_dbm": 0 * nb channels as defined by
# spacing, f_min and f_max
p_db = equipment['SI']['default'].power_dbm
p_total_db = p_db + lin2db(automatic_nch(equipment['SI']['default'].f_min,
equipment['SI']['default'].f_max, equipment['SI']['default'].spacing))
try:
build_network(network, equipment, p_db, p_total_db, args.no_insert_edfas)
except exceptions.NetworkTopologyError as e:
print(f'{ansi_escapes.red}Invalid network definition:{ansi_escapes.reset} {e}')
sys.exit(1)
except exceptions.ConfigurationError as e:
print(f'{ansi_escapes.red}Configuration error:{ansi_escapes.reset} {e}')
sys.exit(1)
if args.save_network is not None:
save_network(network, args.save_network)
print(f'{ansi_escapes.blue}Network (after autodesign) saved to {args.save_network}{ansi_escapes.reset}')
oms_list = build_oms_list(network, equipment)
try:
data = load_requests(args.service_filename, equipment, bidir=args.bidir,
network=network, network_filename=args.topology)
rqs = requests_from_json(data, equipment)
except exceptions.ServiceError as e:
print(f'{ansi_escapes.red}Service error:{ansi_escapes.reset} {e}')
sys.exit(1)
# check that request ids are unique. Non unique ids, may
# mess the computation: better to stop the computation
all_ids = [r.request_id for r in rqs]
if len(all_ids) != len(set(all_ids)):
for item in list(set(all_ids)):
all_ids.remove(item)
msg = f'Requests id {all_ids} are not unique'
_logger.critical(msg)
sys.exit()
rqs = correct_json_route_list(network, rqs)
# pths = compute_path(network, equipment, rqs)
dsjn = disjunctions_from_json(data)
print(f'{ansi_escapes.blue}List of disjunctions{ansi_escapes.reset}')
print(dsjn)
# need to warn or correct in case of wrong disjunction form
# disjunction must not be repeated with same or different ids
dsjn = deduplicate_disjunctions(dsjn)
# Aggregate demands with same exact constraints
print(f'{ansi_escapes.blue}Aggregating similar requests{ansi_escapes.reset}')
rqs, dsjn = requests_aggregation(rqs, dsjn)
# TODO export novel set of aggregated demands in a json file
print(f'{ansi_escapes.blue}The following services have been requested:{ansi_escapes.reset}')
print(rqs)
print(f'{ansi_escapes.blue}Computing all paths with constraints{ansi_escapes.reset}')
try:
pths = compute_path_dsjctn(network, equipment, rqs, dsjn)
except exceptions.DisjunctionError as this_e:
print(f'{ansi_escapes.red}Disjunction error:{ansi_escapes.reset} {this_e}')
sys.exit(1)
print(f'{ansi_escapes.blue}Propagating on selected path{ansi_escapes.reset}')
propagatedpths, reversed_pths, reversed_propagatedpths = compute_path_with_disjunction(network, equipment, rqs, pths)
# Note that deepcopy used in compute_path_with_disjunction returns
# a list of nodes which are not belonging to network (they are copies of the node objects).
# so there can not be propagation on these nodes.
pth_assign_spectrum(pths, rqs, oms_list, reversed_pths)
print(f'{ansi_escapes.blue}Result summary{ansi_escapes.reset}')
header = ['req id', ' demand', ' GSNR@bandwidth A-Z (Z-A)', ' GSNR@0.1nm A-Z (Z-A)',
' Receiver minOSNR', ' mode', ' Gbit/s', ' nb of tsp pairs',
'N,M or blocking reason']
data = []
data.append(header)
for i, this_p in enumerate(propagatedpths):
rev_pth = reversed_propagatedpths[i]
if rev_pth and this_p:
psnrb = f'{round(mean(this_p[-1].snr),2)} ({round(mean(rev_pth[-1].snr),2)})'
psnr = f'{round(mean(this_p[-1].snr_01nm), 2)}' +\
f' ({round(mean(rev_pth[-1].snr_01nm),2)})'
elif this_p:
psnrb = f'{round(mean(this_p[-1].snr),2)}'
psnr = f'{round(mean(this_p[-1].snr_01nm),2)}'
try:
if rqs[i].blocking_reason in BLOCKING_NOPATH:
line = [f'{rqs[i].request_id}', f' {rqs[i].source} to {rqs[i].destination} :',
f'-', f'-', f'-', f'{rqs[i].tsp_mode}', f'{round(rqs[i].path_bandwidth * 1e-9,2)}',
f'-', f'{rqs[i].blocking_reason}']
else:
line = [f'{rqs[i].request_id}', f' {rqs[i].source} to {rqs[i].destination} : ', psnrb,
psnr, f'-', f'{rqs[i].tsp_mode}', f'{round(rqs[i].path_bandwidth * 1e-9, 2)}',
f'-', f'{rqs[i].blocking_reason}']
except AttributeError:
line = [f'{rqs[i].request_id}', f' {rqs[i].source} to {rqs[i].destination} : ', psnrb,
psnr, f'{rqs[i].OSNR + equipment["SI"]["default"].sys_margins}',
f'{rqs[i].tsp_mode}', f'{round(rqs[i].path_bandwidth * 1e-9,2)}',
f'{ceil(rqs[i].path_bandwidth / rqs[i].bit_rate) }', f'({rqs[i].N},{rqs[i].M})']
data.append(line)
col_width = max(len(word) for row in data for word in row[2:]) # padding
firstcol_width = max(len(row[0]) for row in data) # padding
secondcol_width = max(len(row[1]) for row in data) # padding
for row in data:
firstcol = ''.join(row[0].ljust(firstcol_width))
secondcol = ''.join(row[1].ljust(secondcol_width))
remainingcols = ''.join(word.center(col_width, ' ') for word in row[2:])
print(f'{firstcol} {secondcol} {remainingcols}')
print(f'{ansi_escapes.yellow}Result summary shows mean GSNR and OSNR (average over all channels){ansi_escapes.reset}')
if args.output:
result = []
# assumes that list of rqs and list of propgatedpths have same order
for i, pth in enumerate(propagatedpths):
result.append(ResultElement(rqs[i], pth, reversed_propagatedpths[i]))
temp = _path_result_json(result)
if args.output.suffix.lower() == '.json':
save_json(temp, args.output)
print(f'{ansi_escapes.blue}Saved JSON to {args.output}{ansi_escapes.reset}')
elif args.output.suffix.lower() == '.csv':
with open(args.output, "w", encoding='utf-8') as fcsv:
jsontocsv(temp, equipment, fcsv)
print(f'{ansi_escapes.blue}Saved CSV to {args.output}{ansi_escapes.reset}')
else:
print(f'{ansi_escapes.red}Cannot save output: neither JSON nor CSV file{ansi_escapes.reset}')
sys.exit(1)
|
6b82901e824c7b79b681a6834733fe51fa38550a
|
7f4605f5424a4a62794c701cb9b26946cf3a66e1
|
/examples/contrib/quantization_aware_training/parser.py
|
2c445c48fa5280697c6c08a759aef66d78ecc23f
|
[
"MIT"
] |
permissive
|
NVIDIA-AI-IOT/torch2trt
|
8bad3d178767e70a009e0055e89e37e72e0b1027
|
36656b614f3fbc067ac673932e2200d7afdae712
|
refs/heads/master
| 2023-07-31T22:52:09.467109
| 2023-04-28T17:22:32
| 2023-04-28T17:22:32
| 183,790,380
| 4,295
| 719
|
MIT
| 2023-04-28T17:22:33
| 2019-04-27T15:30:56
|
Python
|
UTF-8
|
Python
| false
| false
| 2,300
|
py
|
parser.py
|
import argparse
def parse_args():
"""
"""
parser = argparse.ArgumentParser(description='PyTorch QAT')
parser.add_argument('--tl','--transfer_learning',action='store_true',help='used to map weights correctly')
parser.add_argument('--iter',default=300, type=int, help='no of iterations')
parser.add_argument('--m','--model_name',default=None,help="Name of the model")
parser.add_argument('--b', '--batch_size', default=128, type=int, help='mini-batch size (default: 32)')
parser.add_argument('--optimizer', default='Adam', type=str,help='type of optimizer (default=Adam)')
parser.add_argument( '--wd','--weight-decay', default=1e-5, type=float, help='weight decay (default: 1e-5)')
parser.add_argument('--start_epoch','--s_ep', default=0, type=int, help='starting epoch')
parser.add_argument('--num_epochs',default=30,type=int, help='no of epochs')
parser.add_argument('--no_cuda', action='store_true',help='disables cuda training')
parser.add_argument('--seed', type=int, default=12345,help='random seed for experiments. [default: 12345]')
parser.add_argument('--lr', '--learning_rate', default=1e-3, type=float, help='initial learning rate')
parser.add_argument('--lrdt', '--learning_rate_decay_interval', default=30, type=int, help='initial learning rate decay after n epochs')
parser.add_argument('--od','--output_dir', default='/tmp/',help='output path')
parser.add_argument('--en','--exp_name', default='pytorch_exp',help = 'experiment name to create output dir')
parser.add_argument('--load_ckpt', default = None, help = "path to ckpt")
parser.add_argument('--netqat',action='store_true',help = 'quantize model using custom layer')
parser.add_argument('--partial_ckpt',action='store_true',help = 'load_partial checkpoint')
parser.add_argument('--v','--verbose',action='store_true')
parser.add_argument('--FP16',action='store_true',help='run TRT engine at FP16')
parser.add_argument('--test_trt',action='store_true',help='gather metrics using trt')
parser.add_argument('--INT8PTC',action='store_true',help='run TRT engine at INT8 with Post Training Cal')
parser.add_argument('--INT8QAT',action='store_true',help='run TRT engine at INT8 with QAT')
args = parser.parse_args()
return args
|
4234b660446fc26f7ccffd4a9fc2633ae96b83fb
|
9734c93c86c982b1ce046340bac9e53645b261b8
|
/tests/output/l2t_csv.py
|
f00780eea4c0347a035cac094ebb66e98ba68d29
|
[
"Apache-2.0"
] |
permissive
|
log2timeline/plaso
|
cd72dd407d6c5627506c14f58cb8f6a6926aa808
|
d6022f8cfebfddf2d08ab2d300a41b61f3349933
|
refs/heads/main
| 2023-09-02T08:43:48.241198
| 2023-08-19T07:28:12
| 2023-08-19T07:28:12
| 23,812,315
| 1,506
| 421
|
Apache-2.0
| 2023-09-04T08:24:53
| 2014-09-08T23:29:28
|
Python
|
UTF-8
|
Python
| false
| false
| 10,379
|
py
|
l2t_csv.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the log2timeline (l2t) CSV output module."""
import io
import unittest
from dfvfs.path import fake_path_spec
from plaso.containers import events
from plaso.lib import definitions
from plaso.output import l2t_csv
from tests.containers import test_lib as containers_test_lib
from tests.output import test_lib
class L2TCSVFieldFormattingHelperTest(test_lib.OutputModuleTestCase):
"""L2T CSV field formatting helper."""
# pylint: disable=protected-access
_TEST_EVENTS = [
{'_parser_chain': 'test_parser',
'a_binary_field': b'binary',
'data_type': 'test:event',
'filename': 'log/syslog.1',
'hostname': 'ubuntu',
'my_number': 123,
'path_spec': fake_path_spec.FakePathSpec(
location='log/syslog.1'),
'some_additional_foo': True,
'text': (
'Reporter <CRON> PID: 8442 (pam_unix(cron:session): session\n '
'closed for user root)'),
'timestamp': '2012-06-27 18:17:01',
'timestamp_desc': definitions.TIME_DESCRIPTION_WRITTEN},
{'_parser_chain': 'test_parser',
'a_binary_field': b'binary',
'data_type': 'test:event',
'filename': 'log/syslog.1',
'hostname': 'ubuntu',
'my_number': 123,
'path_spec': fake_path_spec.FakePathSpec(
location='log/syslog.1'),
'some_additional_foo': True,
'text': (
'Reporter <CRON> PID: 8442 (pam_unix(cron:session): session\n '
'closed for user root)'),
'timestamp': '2012-06-28 00:17:01',
'timestamp_desc': definitions.TIME_DESCRIPTION_WRITTEN}]
def testFormatDate(self):
"""Tests the _FormatDate function."""
output_mediator = self._CreateOutputMediator()
formatting_helper = l2t_csv.L2TCSVFieldFormattingHelper()
# Test with event.date_time
event, event_data, event_data_stream = (
containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))
date_string = formatting_helper._FormatDate(
output_mediator, event, event_data, event_data_stream)
self.assertEqual(date_string, '06/27/2012')
output_mediator.SetTimeZone('Australia/Sydney')
date_string = formatting_helper._FormatDate(
output_mediator, event, event_data, event_data_stream)
self.assertEqual(date_string, '06/28/2012')
output_mediator.SetTimeZone('UTC')
event, event_data, event_data_stream = (
containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))
event.date_time._time_zone_offset = 600
date_string = formatting_helper._FormatDate(
output_mediator, event, event_data, event_data_stream)
self.assertEqual(date_string, '06/27/2012')
# Test with event.is_local_time
event, event_data, event_data_stream = (
containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))
event.timestamp += 600 * 60 * 1000000
event.date_time.is_local_time = True
date_string = formatting_helper._FormatDate(
output_mediator, event, event_data, event_data_stream)
self.assertEqual(date_string, '06/28/2012')
# Test with event.timestamp
event, event_data, event_data_stream = (
containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))
event.date_time = None
date_string = formatting_helper._FormatDate(
output_mediator, event, event_data, event_data_stream)
self.assertEqual(date_string, '06/27/2012')
event.timestamp = -9223372036854775808
date_string = formatting_helper._FormatDate(
output_mediator, event, event_data, event_data_stream)
self.assertEqual(date_string, '00/00/0000')
def testFormatExtraAttributes(self):
"""Tests the _FormatExtraAttributes function."""
output_mediator = self._CreateOutputMediator()
formatters_directory_path = self._GetTestFilePath(['formatters'])
output_mediator.ReadMessageFormattersFromDirectory(
formatters_directory_path)
formatting_helper = l2t_csv.L2TCSVFieldFormattingHelper()
event, event_data, event_data_stream = (
containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))
extra_attributes_string = formatting_helper._FormatExtraAttributes(
output_mediator, event, event_data, event_data_stream)
expected_extra_attributes_string = (
'a_binary_field: binary; '
'my_number: 123; '
'some_additional_foo: True')
self.assertEqual(extra_attributes_string, expected_extra_attributes_string)
def testFormatParser(self):
"""Tests the _FormatParser function."""
output_mediator = self._CreateOutputMediator()
formatting_helper = l2t_csv.L2TCSVFieldFormattingHelper()
event, event_data, event_data_stream = (
containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))
parser_string = formatting_helper._FormatParser(
output_mediator, event, event_data, event_data_stream)
self.assertEqual(parser_string, 'test_parser')
def testFormatType(self):
"""Tests the _FormatType function."""
output_mediator = self._CreateOutputMediator()
formatting_helper = l2t_csv.L2TCSVFieldFormattingHelper()
event, event_data, event_data_stream = (
containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))
type_string = formatting_helper._FormatType(
output_mediator, event, event_data, event_data_stream)
self.assertEqual(type_string, 'Content Modification Time')
def testFormatVersion(self):
"""Tests the _FormatVersion function."""
output_mediator = self._CreateOutputMediator()
formatting_helper = l2t_csv.L2TCSVFieldFormattingHelper()
event, event_data, event_data_stream = (
containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))
version_string = formatting_helper._FormatVersion(
output_mediator, event, event_data, event_data_stream)
self.assertEqual(version_string, '2')
class L2TCSVTest(test_lib.OutputModuleTestCase):
"""Tests for the L2tCSV output module."""
# pylint: disable=protected-access
_TEST_EVENTS = [
{'_parser_chain': 'test_parser',
'a_binary_field': b'binary',
'data_type': 'test:event',
'filename': 'log/syslog.1',
'hostname': 'ubuntu',
'my_number': 123,
'path_spec': fake_path_spec.FakePathSpec(
location='log/syslog.1'),
'some_additional_foo': True,
'text': (
'Reporter <CRON> PID: 8442 (pam_unix(cron:session): session\n '
'closed for user root)'),
'timestamp': '2012-06-27 18:17:01',
'timestamp_desc': definitions.TIME_DESCRIPTION_WRITTEN}]
def testGetFieldValues(self):
"""Tests the _GetFieldValues function."""
output_mediator = self._CreateOutputMediator()
formatters_directory_path = self._GetTestFilePath(['formatters'])
output_mediator.ReadMessageFormattersFromDirectory(
formatters_directory_path)
output_module = l2t_csv.L2TCSVOutputModule()
event, event_data, event_data_stream = (
containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))
event_tag = events.EventTag()
event_tag.AddLabels(['Malware', 'Printed'])
expected_field_values = {
'date': '06/27/2012',
'desc': ('Reporter <CRON> PID: 8442 (pam_unix(cron:session): session '
'closed for user root)'),
'extra': ('a_binary_field: binary; my_number: 123; '
'some_additional_foo: True'),
'filename': 'FAKE:log/syslog.1',
'format': 'test_parser',
'host': 'ubuntu',
'inode': '-',
'MACB': 'M...',
'notes': 'Malware Printed',
'short': ('Reporter <CRON> PID: 8442 (pam_unix(cron:session): '
'session closed for user root)'),
'source': 'FILE',
'sourcetype': 'Test log file',
'time': '18:17:01',
'timezone': 'UTC',
'type': 'Content Modification Time',
'user': '-',
'version': '2'}
field_values = output_module._GetFieldValues(
output_mediator, event, event_data, event_data_stream, event_tag)
self.assertEqual(field_values, expected_field_values)
def testWriteFieldValues(self):
"""Tests the _WriteFieldValues function."""
test_file_object = io.StringIO()
output_mediator = self._CreateOutputMediator()
formatters_directory_path = self._GetTestFilePath(['formatters'])
output_mediator.ReadMessageFormattersFromDirectory(
formatters_directory_path)
output_module = l2t_csv.L2TCSVOutputModule()
output_module._file_object = test_file_object
event, event_data, event_data_stream = (
containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))
event_tag = events.EventTag()
event_tag.AddLabels(['Malware', 'Printed'])
field_values = output_module._GetFieldValues(
output_mediator, event, event_data, event_data_stream, event_tag)
output_module._WriteFieldValues(output_mediator, field_values)
output_module._FlushSortedStringsHeap()
expected_event_body = (
'06/27/2012,18:17:01,UTC,M...,FILE,Test log file,Content Modification '
'Time,-,ubuntu,Reporter <CRON> PID: 8442 (pam_unix(cron:session): '
'session closed for user root),Reporter <CRON> PID: 8442 '
'(pam_unix(cron:session): session closed for user root),'
'2,FAKE:log/syslog.1,-,Malware Printed,test_parser,a_binary_field: '
'binary; my_number: 123; some_additional_foo: True\n')
event_body = test_file_object.getvalue()
self.assertEqual(event_body, expected_event_body)
# Ensure that the only commas returned are the 16 delimiters.
self.assertEqual(event_body.count(','), 16)
# TODO: add coverage for WriteEventMACBGroup
def testWriteHeader(self):
"""Tests the WriteHeader function."""
test_file_object = io.StringIO()
output_mediator = self._CreateOutputMediator()
output_module = l2t_csv.L2TCSVOutputModule()
output_module._file_object = test_file_object
output_module.WriteHeader(output_mediator)
expected_header = (
'date,time,timezone,MACB,source,sourcetype,type,user,host,short,desc,'
'version,filename,inode,notes,format,extra\n')
header = test_file_object.getvalue()
self.assertEqual(header, expected_header)
if __name__ == '__main__':
unittest.main()
|
52436e32db9a8e0405859058e0b6c6ebd2f152fa
|
689a78e08c957abc02ea5f89fb657b1f78f88b6e
|
/det3d/models/necks/rpn.py
|
8d489399d959deb3a5e93c42f902bfc49dc4a6c4
|
[
"MIT",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
tianweiy/CenterPoint
|
2bb9a7def8d4bf87b66af2e3b671736eae6fa275
|
d3a248fa56db2601860d576d5934d00fee9916eb
|
refs/heads/master
| 2023-08-30T23:11:49.528882
| 2022-10-24T13:09:52
| 2022-10-24T13:09:52
| 274,006,091
| 1,692
| 476
|
MIT
| 2023-05-06T10:30:06
| 2020-06-22T00:32:05
|
Python
|
UTF-8
|
Python
| false
| false
| 5,501
|
py
|
rpn.py
|
import time
import numpy as np
import math
import torch
from torch import nn
from torch.nn import functional as F
from torchvision.models import resnet
from torch.nn.modules.batchnorm import _BatchNorm
from det3d.torchie.cnn import constant_init, kaiming_init, xavier_init
from det3d.torchie.trainer import load_checkpoint
from det3d.models.utils import Empty, GroupNorm, Sequential
from det3d.models.utils import change_default_args
from .. import builder
from ..registry import NECKS
from ..utils import build_norm_layer
@NECKS.register_module
class RPN(nn.Module):
def __init__(
self,
layer_nums,
ds_layer_strides,
ds_num_filters,
us_layer_strides,
us_num_filters,
num_input_features,
norm_cfg=None,
name="rpn",
logger=None,
**kwargs
):
super(RPN, self).__init__()
self._layer_strides = ds_layer_strides
self._num_filters = ds_num_filters
self._layer_nums = layer_nums
self._upsample_strides = us_layer_strides
self._num_upsample_filters = us_num_filters
self._num_input_features = num_input_features
if norm_cfg is None:
norm_cfg = dict(type="BN", eps=1e-3, momentum=0.01)
self._norm_cfg = norm_cfg
assert len(self._layer_strides) == len(self._layer_nums)
assert len(self._num_filters) == len(self._layer_nums)
assert len(self._num_upsample_filters) == len(self._upsample_strides)
self._upsample_start_idx = len(self._layer_nums) - len(self._upsample_strides)
must_equal_list = []
for i in range(len(self._upsample_strides)):
# print(upsample_strides[i])
must_equal_list.append(
self._upsample_strides[i]
/ np.prod(self._layer_strides[: i + self._upsample_start_idx + 1])
)
for val in must_equal_list:
assert val == must_equal_list[0]
in_filters = [self._num_input_features, *self._num_filters[:-1]]
blocks = []
deblocks = []
for i, layer_num in enumerate(self._layer_nums):
block, num_out_filters = self._make_layer(
in_filters[i],
self._num_filters[i],
layer_num,
stride=self._layer_strides[i],
)
blocks.append(block)
if i - self._upsample_start_idx >= 0:
stride = (self._upsample_strides[i - self._upsample_start_idx])
if stride > 1:
deblock = Sequential(
nn.ConvTranspose2d(
num_out_filters,
self._num_upsample_filters[i - self._upsample_start_idx],
stride,
stride=stride,
bias=False,
),
build_norm_layer(
self._norm_cfg,
self._num_upsample_filters[i - self._upsample_start_idx],
)[1],
nn.ReLU(),
)
else:
stride = np.round(1 / stride).astype(np.int64)
deblock = Sequential(
nn.Conv2d(
num_out_filters,
self._num_upsample_filters[i - self._upsample_start_idx],
stride,
stride=stride,
bias=False,
),
build_norm_layer(
self._norm_cfg,
self._num_upsample_filters[i - self._upsample_start_idx],
)[1],
nn.ReLU(),
)
deblocks.append(deblock)
self.blocks = nn.ModuleList(blocks)
self.deblocks = nn.ModuleList(deblocks)
logger.info("Finish RPN Initialization")
@property
def downsample_factor(self):
factor = np.prod(self._layer_strides)
if len(self._upsample_strides) > 0:
factor /= self._upsample_strides[-1]
return factor
def _make_layer(self, inplanes, planes, num_blocks, stride=1):
block = Sequential(
nn.ZeroPad2d(1),
nn.Conv2d(inplanes, planes, 3, stride=stride, bias=False),
build_norm_layer(self._norm_cfg, planes)[1],
# nn.BatchNorm2d(planes, eps=1e-3, momentum=0.01),
nn.ReLU(),
)
for j in range(num_blocks):
block.add(nn.Conv2d(planes, planes, 3, padding=1, bias=False))
block.add(
build_norm_layer(self._norm_cfg, planes)[1],
# nn.BatchNorm2d(planes, eps=1e-3, momentum=0.01)
)
block.add(nn.ReLU())
return block, planes
# default init_weights for conv(msra) and norm in ConvModule
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution="uniform")
def forward(self, x):
ups = []
for i in range(len(self.blocks)):
x = self.blocks[i](x)
if i - self._upsample_start_idx >= 0:
ups.append(self.deblocks[i - self._upsample_start_idx](x))
if len(ups) > 0:
x = torch.cat(ups, dim=1)
return x
|
faa41df9b3d9ac5f9a5edcc676ee16dd22aad8c7
|
c530897cb72b6943c7226b25824444cad5f3503b
|
/usaspending_api/recipient/v2/views/list_recipients.py
|
b4bae78336b83e269f6cd2e89d784ade67ae27f7
|
[
"CC0-1.0"
] |
permissive
|
fedspendingtransparency/usaspending-api
|
fc63a22d32ea0207b7273d3e1ef26ba9dbabc42a
|
38f920438697930ae3ac57bbcaae9034877d8fb7
|
refs/heads/master
| 2023-09-01T22:00:36.633612
| 2023-08-29T18:39:18
| 2023-08-29T18:39:18
| 65,394,827
| 276
| 118
|
CC0-1.0
| 2023-09-14T20:33:15
| 2016-08-10T15:39:45
|
Python
|
UTF-8
|
Python
| false
| false
| 5,739
|
py
|
list_recipients.py
|
import copy
import logging
from django.db.models import F, Q
from django.utils.decorators import method_decorator
from rest_framework.response import Response
from rest_framework.views import APIView
from usaspending_api.common.api_versioning import deprecated
from usaspending_api.common.cache_decorator import cache_response
from usaspending_api.common.helpers.generic_helper import get_pagination_metadata
from usaspending_api.common.validator.pagination import PAGINATION
from usaspending_api.common.validator.tinyshield import TinyShield
from usaspending_api.common.validator.utils import update_model_in_list
from usaspending_api.recipient.models import RecipientProfile
from usaspending_api.recipient.v2.lookups import AWARD_TYPES, SPECIAL_CASES
logger = logging.getLogger(__name__)
award_types = list(AWARD_TYPES.keys()) + ["all"]
RECIPIENT_MODELS = [
{"name": "keyword", "key": "keyword", "type": "text", "text_type": "search"},
{"name": "award_type", "key": "award_type", "type": "enum", "enum_values": award_types, "default": "all"},
]
def build_recipient_identifier_base_query(filters):
qs_filter = Q()
if "keyword" in filters:
qs_filter |= Q(recipient_name__contains=filters["keyword"].upper())
qs_filter |= Q(recipient_unique_id__contains=filters["keyword"]) | Q(uei__contains=filters["keyword"])
if filters["award_type"] != "all":
qs_filter &= Q(award_types__overlap=[AWARD_TYPES[filters["award_type"]]["filter"]])
return qs_filter
def get_recipients(filters={}, count=None):
lower_limit = (filters["page"] - 1) * filters["limit"]
upper_limit = filters["page"] * filters["limit"]
amount_column = "last_12_months"
if filters["award_type"] != "all":
amount_column = AWARD_TYPES[filters["award_type"]]["amount"]
qs_filter = build_recipient_identifier_base_query(filters)
queryset = (
RecipientProfile.objects.filter(qs_filter)
.values("recipient_level", "recipient_hash", "recipient_unique_id", "recipient_name", amount_column, "uei")
.exclude(recipient_name__in=SPECIAL_CASES)
)
api_to_db_mapper = {
"amount": amount_column,
"duns": "recipient_unique_id",
"uei": "uei",
"name": "recipient_name",
}
# Nulls Last isn't enabled for the amount sort because it prevents queries sorted by amount columns DESC
# from using an index on those columns, even though they cannot contain nulls
nulls_last = filters["sort"] in ["name", "duns"]
if filters["order"] == "desc":
queryset = queryset.order_by(F(api_to_db_mapper[filters["sort"]]).desc(nulls_last=nulls_last))
else:
queryset = queryset.order_by(F(api_to_db_mapper[filters["sort"]]).asc(nulls_last=nulls_last))
if count is None:
count = get_recipient_count(filters=filters)
page_metadata = get_pagination_metadata(count, filters["limit"], filters["page"])
results = [
{
"id": "{}-{}".format(row["recipient_hash"], row["recipient_level"]),
"duns": row["recipient_unique_id"],
"uei": row["uei"],
"name": row["recipient_name"],
"recipient_level": row["recipient_level"],
"amount": row[amount_column],
}
for row in queryset[lower_limit:upper_limit]
]
return results, page_metadata
def get_recipient_count(filters={}):
qs_filter = build_recipient_identifier_base_query(filters)
return RecipientProfile.objects.filter(qs_filter).exclude(recipient_name__in=SPECIAL_CASES).count()
class RecipientCount(APIView):
"""
This route takes a single keyword filter and award_type, and returns a count of matching recipients
"""
endpoint_doc = "usaspending_api/api_contracts/contracts/v2/recipient/count.md"
cache_key_whitelist = ["keyword", "award_type"]
@cache_response()
def post(self, request):
validated_payload = TinyShield(RECIPIENT_MODELS).block(request.data)
return Response({"count": get_recipient_count(validated_payload)})
class ListRecipients(APIView):
"""
This route takes a single keyword filter (and pagination filters), and returns a list of recipients
"""
endpoint_doc = "usaspending_api/api_contracts/contracts/v2/recipient.md"
def request_count(self, filters={}):
response = RecipientCount.as_view()(request=self.request._request).data
return response["count"]
@cache_response()
def post(self, request):
models = copy.deepcopy(RECIPIENT_MODELS)
models.extend(copy.deepcopy(PAGINATION)) # page, limit, sort, order
# Override pagination default limit of 100
for model in models:
if model["name"] == "limit":
model["max"] = 1000
new_sort = {"type": "enum", "enum_values": ["name", "uei", "duns", "amount"], "default": "amount"}
models = update_model_in_list(models, "sort", new_sort)
models = update_model_in_list(models, "limit", {"default": 50})
validated_payload = TinyShield(models).block(request.data)
count = self.request_count(validated_payload)
results, page_metadata = get_recipients(filters=validated_payload, count=count)
return Response({"page_metadata": page_metadata, "results": results})
@method_decorator(deprecated, name="post")
class ListRecipientsByDuns(ListRecipients):
"""
<em>Deprecated: Please see <a href="../">this endpoint</a> instead.</em>
This route takes a single keyword filter (and pagination filters), and returns a list of recipients
"""
endpoint_doc = "usaspending_api/api_contracts/contracts/v2/recipient/duns.md"
def __init__(self):
super().__init__()
|
a227fdcab03fdfb79e821f94a035ca7b058bee14
|
6bbbb8237c93f9b1f302010a65d6ecb6f286f23b
|
/websauna/tests/utils/test_utils_crypt.py
|
99e024122e47ae9119d609d2f3ad8006f4cbe844
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
websauna/websauna
|
ea60d5a6aef59b3725bace0d188dacea72574e63
|
a57de54fb8a3fae859f24f373f0292e1e4b3c344
|
refs/heads/master
| 2023-08-07T08:16:51.664340
| 2020-06-06T19:28:18
| 2020-06-06T19:28:18
| 49,773,298
| 294
| 74
|
NOASSERTION
| 2020-12-30T01:48:27
| 2016-01-16T12:55:54
|
Python
|
UTF-8
|
Python
| false
| false
| 729
|
py
|
test_utils_crypt.py
|
"""Test utils.crypt."""
# Standard Library
import string
import pytest
# Websauna
from websauna.utils import crypt
test_data = (
(32, None, crypt._default),
(32, crypt._default, crypt._default),
(40, string.digits, string.digits),
)
@pytest.mark.parametrize('length,letters,pool', test_data)
def test_generate_random_string(length, letters, pool):
"""Test crypt.generate_random_string.
Test if length and letters params are respected.
"""
func = crypt.generate_random_string
params = {
'length': length,
}
if letters:
params['letters'] = letters
result = func(**params)
assert len(result) == length
assert len([c for c in result if c in pool]) == length
|
d7f38f0fd98fd6ca4cdb4c98608d504b4ce8beca
|
b095173b2dbc77c8ad61c42403258c76169b7a63
|
/src/sagemaker/jumpstart/utils.py
|
f77e1ae2317fb05047cf8d7a34957ae864c567fe
|
[
"Apache-2.0"
] |
permissive
|
aws/sagemaker-python-sdk
|
666665e717cfb76698ba3ea7563b45344634264d
|
8d5d7fd8ae1a917ed3e2b988d5e533bce244fd85
|
refs/heads/master
| 2023-09-04T01:00:20.663626
| 2023-08-31T15:29:19
| 2023-08-31T15:29:19
| 110,621,895
| 2,050
| 1,255
|
Apache-2.0
| 2023-09-14T17:37:15
| 2017-11-14T01:03:33
|
Python
|
UTF-8
|
Python
| false
| false
| 24,573
|
py
|
utils.py
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""This module contains utilities related to SageMaker JumpStart."""
from __future__ import absolute_import
import logging
import os
from typing import Any, Dict, List, Optional
from urllib.parse import urlparse
from packaging.version import Version
import sagemaker
from sagemaker.config.config_schema import (
MODEL_ENABLE_NETWORK_ISOLATION_PATH,
MODEL_EXECUTION_ROLE_ARN_PATH,
TRAINING_JOB_ENABLE_NETWORK_ISOLATION_PATH,
TRAINING_JOB_INTER_CONTAINER_ENCRYPTION_PATH,
TRAINING_JOB_ROLE_ARN_PATH,
)
from sagemaker.jumpstart import constants, enums
from sagemaker.jumpstart import accessors
from sagemaker.s3 import parse_s3_url
from sagemaker.jumpstart.exceptions import (
DeprecatedJumpStartModelError,
VulnerableJumpStartModelError,
)
from sagemaker.jumpstart.types import (
JumpStartModelHeader,
JumpStartModelSpecs,
JumpStartVersionedModelId,
)
from sagemaker.session import Session
from sagemaker.config import load_sagemaker_config
from sagemaker.utils import resolve_value_from_config
from sagemaker.workflow import is_pipeline_variable
def get_jumpstart_launched_regions_message() -> str:
"""Returns formatted string indicating where JumpStart is launched."""
if len(constants.JUMPSTART_REGION_NAME_SET) == 0:
return "JumpStart is not available in any region."
if len(constants.JUMPSTART_REGION_NAME_SET) == 1:
region = list(constants.JUMPSTART_REGION_NAME_SET)[0]
return f"JumpStart is available in {region} region."
sorted_regions = sorted(list(constants.JUMPSTART_REGION_NAME_SET))
if len(constants.JUMPSTART_REGION_NAME_SET) == 2:
return f"JumpStart is available in {sorted_regions[0]} and {sorted_regions[1]} regions."
formatted_launched_regions_list = []
for i, region in enumerate(sorted_regions):
region_prefix = "" if i < len(sorted_regions) - 1 else "and "
formatted_launched_regions_list.append(region_prefix + region)
formatted_launched_regions_str = ", ".join(formatted_launched_regions_list)
return f"JumpStart is available in {formatted_launched_regions_str} regions."
def get_jumpstart_content_bucket(
region: str = constants.JUMPSTART_DEFAULT_REGION_NAME,
) -> str:
"""Returns regionalized content bucket name for JumpStart.
Raises:
RuntimeError: If JumpStart is not launched in ``region``.
"""
old_content_bucket: Optional[
str
] = accessors.JumpStartModelsAccessor.get_jumpstart_content_bucket()
info_logs: List[str] = []
bucket_to_return: Optional[str] = None
if (
constants.ENV_VARIABLE_JUMPSTART_CONTENT_BUCKET_OVERRIDE in os.environ
and len(os.environ[constants.ENV_VARIABLE_JUMPSTART_CONTENT_BUCKET_OVERRIDE]) > 0
):
bucket_to_return = os.environ[constants.ENV_VARIABLE_JUMPSTART_CONTENT_BUCKET_OVERRIDE]
info_logs.append(f"Using JumpStart bucket override: '{bucket_to_return}'")
else:
try:
bucket_to_return = constants.JUMPSTART_REGION_NAME_TO_LAUNCHED_REGION_DICT[
region
].content_bucket
except KeyError:
formatted_launched_regions_str = get_jumpstart_launched_regions_message()
raise ValueError(
f"Unable to get content bucket for JumpStart in {region} region. "
f"{formatted_launched_regions_str}"
)
accessors.JumpStartModelsAccessor.set_jumpstart_content_bucket(bucket_to_return)
if bucket_to_return != old_content_bucket:
accessors.JumpStartModelsAccessor.reset_cache()
for info_log in info_logs:
constants.JUMPSTART_LOGGER.info(info_log)
return bucket_to_return
def get_formatted_manifest(
manifest: List[Dict],
) -> Dict[JumpStartVersionedModelId, JumpStartModelHeader]:
"""Returns formatted manifest dictionary from raw manifest.
Keys are JumpStartVersionedModelId objects, values are
``JumpStartModelHeader`` objects.
"""
manifest_dict = {}
for header in manifest:
header_obj = JumpStartModelHeader(header)
manifest_dict[
JumpStartVersionedModelId(header_obj.model_id, header_obj.version)
] = header_obj
return manifest_dict
def get_sagemaker_version() -> str:
"""Returns sagemaker library version.
If the sagemaker library version has not been set, this function
calls ``parse_sagemaker_version`` to retrieve the version and set
the constant.
"""
if accessors.SageMakerSettings.get_sagemaker_version() == "":
accessors.SageMakerSettings.set_sagemaker_version(parse_sagemaker_version())
return accessors.SageMakerSettings.get_sagemaker_version()
def parse_sagemaker_version() -> str:
"""Returns sagemaker library version. This should only be called once.
Function reads ``__version__`` variable in ``sagemaker`` module.
In order to maintain compatibility with the ``packaging.version``
library, versions with fewer than 2, or more than 3, periods are rejected.
All versions that cannot be parsed with ``packaging.version`` are also
rejected.
Raises:
RuntimeError: If the SageMaker version is not readable. An exception is also raised if
the version cannot be parsed by ``packaging.version``.
"""
version = sagemaker.__version__
parsed_version = None
num_periods = version.count(".")
if num_periods == 2:
parsed_version = version
elif num_periods == 3:
trailing_period_index = version.rfind(".")
parsed_version = version[:trailing_period_index]
else:
raise RuntimeError(f"Bad value for SageMaker version: {sagemaker.__version__}")
Version(parsed_version)
return parsed_version
def is_jumpstart_model_input(model_id: Optional[str], version: Optional[str]) -> bool:
"""Determines if `model_id` and `version` input are for JumpStart.
This method returns True if both arguments are not None, false if both arguments
are None, and raises an exception if one argument is None but the other isn't.
Args:
model_id (str): Optional. Model ID of the JumpStart model.
version (str): Optional. Version of the JumpStart model.
Raises:
ValueError: If only one of the two arguments is None.
"""
if model_id is not None or version is not None:
if model_id is None or version is None:
raise ValueError(
"Must specify JumpStart `model_id` and `model_version` when getting specs for "
"JumpStart models."
)
return True
return False
def is_jumpstart_model_uri(uri: Optional[str]) -> bool:
"""Returns True if URI corresponds to a JumpStart-hosted model.
Args:
uri (Optional[str]): uri for inference/training job.
"""
bucket = None
if urlparse(uri).scheme == "s3":
bucket, _ = parse_s3_url(uri)
return bucket in constants.JUMPSTART_BUCKET_NAME_SET
def tag_key_in_array(tag_key: str, tag_array: List[Dict[str, str]]) -> bool:
"""Returns True if ``tag_key`` is in the ``tag_array``.
Args:
tag_key (str): the tag key to check if it's already in the ``tag_array``.
tag_array (List[Dict[str, str]]): array of tags to check for ``tag_key``.
"""
for tag in tag_array:
if tag_key == tag["Key"]:
return True
return False
def get_tag_value(tag_key: str, tag_array: List[Dict[str, str]]) -> str:
"""Return the value of a tag whose key matches the given ``tag_key``.
Args:
tag_key (str): AWS tag for which to search.
tag_array (List[Dict[str, str]]): List of AWS tags, each formatted as dicts.
Raises:
KeyError: If the number of matches for the ``tag_key`` is not equal to 1.
"""
tag_values = [tag["Value"] for tag in tag_array if tag_key == tag["Key"]]
if len(tag_values) != 1:
raise KeyError(
f"Cannot get value of tag for tag key '{tag_key}' -- found {len(tag_values)} "
f"number of matches in the tag list."
)
return tag_values[0]
def add_single_jumpstart_tag(
uri: str, tag_key: enums.JumpStartTag, curr_tags: Optional[List[Dict[str, str]]]
) -> Optional[List]:
"""Adds ``tag_key`` to ``curr_tags`` if ``uri`` corresponds to a JumpStart model.
Args:
uri (str): URI which may correspond to a JumpStart model.
tag_key (enums.JumpStartTag): Custom tag to apply to current tags if the URI
corresponds to a JumpStart model.
curr_tags (Optional[List]): Current tags associated with ``Estimator`` or ``Model``.
"""
if is_jumpstart_model_uri(uri):
if curr_tags is None:
curr_tags = []
if not tag_key_in_array(tag_key, curr_tags):
curr_tags.append(
{
"Key": tag_key,
"Value": uri,
}
)
return curr_tags
def get_jumpstart_base_name_if_jumpstart_model(
*uris: Optional[str],
) -> Optional[str]:
"""Return default JumpStart base name if a URI belongs to JumpStart.
If no URIs belong to JumpStart, return None.
Args:
*uris (Optional[str]): URI to test for association with JumpStart.
"""
for uri in uris:
if is_jumpstart_model_uri(uri):
return constants.JUMPSTART_RESOURCE_BASE_NAME
return None
def add_jumpstart_tags(
tags: Optional[List[Dict[str, str]]] = None,
inference_model_uri: Optional[str] = None,
inference_script_uri: Optional[str] = None,
training_model_uri: Optional[str] = None,
training_script_uri: Optional[str] = None,
) -> Optional[List[Dict[str, str]]]:
"""Add custom tags to JumpStart models, return the updated tags.
No-op if this is not a JumpStart model related resource.
Args:
tags (Optional[List[Dict[str,str]]): Current tags for JumpStart inference
or training job. (Default: None).
inference_model_uri (Optional[str]): S3 URI for inference model artifact.
(Default: None).
inference_script_uri (Optional[str]): S3 URI for inference script tarball.
(Default: None).
training_model_uri (Optional[str]): S3 URI for training model artifact.
(Default: None).
training_script_uri (Optional[str]): S3 URI for training script tarball.
(Default: None).
"""
warn_msg = (
"The URI (%s) is a pipeline variable which is only interpreted at execution time. "
"As a result, the JumpStart resources will not be tagged."
)
if inference_model_uri:
if is_pipeline_variable(inference_model_uri):
logging.warning(warn_msg, "inference_model_uri")
else:
tags = add_single_jumpstart_tag(
inference_model_uri, enums.JumpStartTag.INFERENCE_MODEL_URI, tags
)
if inference_script_uri:
if is_pipeline_variable(inference_script_uri):
logging.warning(warn_msg, "inference_script_uri")
else:
tags = add_single_jumpstart_tag(
inference_script_uri, enums.JumpStartTag.INFERENCE_SCRIPT_URI, tags
)
if training_model_uri:
if is_pipeline_variable(training_model_uri):
logging.warning(warn_msg, "training_model_uri")
else:
tags = add_single_jumpstart_tag(
training_model_uri, enums.JumpStartTag.TRAINING_MODEL_URI, tags
)
if training_script_uri:
if is_pipeline_variable(training_script_uri):
logging.warning(warn_msg, "training_script_uri")
else:
tags = add_single_jumpstart_tag(
training_script_uri, enums.JumpStartTag.TRAINING_SCRIPT_URI, tags
)
return tags
def update_inference_tags_with_jumpstart_training_tags(
inference_tags: Optional[List[Dict[str, str]]], training_tags: Optional[List[Dict[str, str]]]
) -> Optional[List[Dict[str, str]]]:
"""Updates the tags for the ``sagemaker.model.Model.deploy`` command with any JumpStart tags.
Args:
inference_tags (Optional[List[Dict[str, str]]]): Custom tags to appy to inference job.
training_tags (Optional[List[Dict[str, str]]]): Tags from training job.
"""
if training_tags:
for tag_key in enums.JumpStartTag:
if tag_key_in_array(tag_key, training_tags):
tag_value = get_tag_value(tag_key, training_tags)
if inference_tags is None:
inference_tags = []
if not tag_key_in_array(tag_key, inference_tags):
inference_tags.append({"Key": tag_key, "Value": tag_value})
return inference_tags
def emit_logs_based_on_model_specs(model_specs: JumpStartModelSpecs, region: str) -> None:
"""Emits logs based on model specs and region."""
if model_specs.hosting_eula_key:
constants.JUMPSTART_LOGGER.info(
"Model '%s' requires accepting end-user license agreement (EULA). "
"See https://%s.s3.%s.amazonaws.com%s/%s for terms of use.",
model_specs.model_id,
get_jumpstart_content_bucket(region=region),
region,
".cn" if region.startswith("cn-") else "",
model_specs.hosting_eula_key,
)
if model_specs.deprecated:
deprecated_message = model_specs.deprecated_message or (
"Using deprecated JumpStart model "
f"'{model_specs.model_id}' and version '{model_specs.version}'."
)
constants.JUMPSTART_LOGGER.warning(deprecated_message)
if model_specs.deprecate_warn_message:
constants.JUMPSTART_LOGGER.warning(model_specs.deprecate_warn_message)
if model_specs.inference_vulnerable or model_specs.training_vulnerable:
constants.JUMPSTART_LOGGER.warning(
"Using vulnerable JumpStart model '%s' and version '%s'.",
model_specs.model_id,
model_specs.version,
)
def verify_model_region_and_return_specs(
model_id: Optional[str],
version: Optional[str],
scope: Optional[str],
region: str,
tolerate_vulnerable_model: bool = False,
tolerate_deprecated_model: bool = False,
sagemaker_session: Session = constants.DEFAULT_JUMPSTART_SAGEMAKER_SESSION,
) -> JumpStartModelSpecs:
"""Verifies that an acceptable model_id, version, scope, and region combination is provided.
Args:
model_id (Optional[str]): model ID of the JumpStart model to verify and
obtains specs.
version (Optional[str]): version of the JumpStart model to verify and
obtains specs.
scope (Optional[str]): scope of the JumpStart model to verify.
region (Optional[str]): region of the JumpStart model to verify and
obtains specs.
tolerate_vulnerable_model (bool): True if vulnerable versions of model
specifications should be tolerated (exception not raised). If False, raises an
exception if the script used by this version of the model has dependencies with known
security vulnerabilities. (Default: False).
tolerate_deprecated_model (bool): True if deprecated models should be tolerated
(exception not raised). False if these models should raise an exception.
(Default: False).
sagemaker_session (sagemaker.session.Session): A SageMaker Session
object, used for SageMaker interactions. If not
specified, one is created using the default AWS configuration
chain. (Default: sagemaker.jumpstart.constants.DEFAULT_JUMPSTART_SAGEMAKER_SESSION).
Raises:
NotImplementedError: If the scope is not supported.
ValueError: If the combination of arguments specified is not supported.
VulnerableJumpStartModelError: If any of the dependencies required by the script have
known security vulnerabilities.
DeprecatedJumpStartModelError: If the version of the model is deprecated.
"""
if scope is None:
raise ValueError(
"Must specify `model_scope` argument to retrieve model "
"artifact uri for JumpStart models."
)
if scope not in constants.SUPPORTED_JUMPSTART_SCOPES:
raise NotImplementedError(
"JumpStart models only support scopes: "
f"{', '.join(constants.SUPPORTED_JUMPSTART_SCOPES)}."
)
model_specs = accessors.JumpStartModelsAccessor.get_model_specs( # type: ignore
region=region,
model_id=model_id,
version=version,
s3_client=sagemaker_session.s3_client,
)
if (
scope == constants.JumpStartScriptScope.TRAINING.value
and not model_specs.training_supported
):
raise ValueError(
f"JumpStart model ID '{model_id}' and version '{version}' " "does not support training."
)
if model_specs.deprecated:
if not tolerate_deprecated_model:
raise DeprecatedJumpStartModelError(
model_id=model_id, version=version, message=model_specs.deprecated_message
)
if scope == constants.JumpStartScriptScope.INFERENCE.value and model_specs.inference_vulnerable:
if not tolerate_vulnerable_model:
raise VulnerableJumpStartModelError(
model_id=model_id,
version=version,
vulnerabilities=model_specs.inference_vulnerabilities,
scope=constants.JumpStartScriptScope.INFERENCE,
)
if scope == constants.JumpStartScriptScope.TRAINING.value and model_specs.training_vulnerable:
if not tolerate_vulnerable_model:
raise VulnerableJumpStartModelError(
model_id=model_id,
version=version,
vulnerabilities=model_specs.training_vulnerabilities,
scope=constants.JumpStartScriptScope.TRAINING,
)
return model_specs
def update_dict_if_key_not_present(
dict_to_update: dict, key_to_add: Any, value_to_add: Any
) -> dict:
"""If a key is not present in the dict, add the new (key, value) pair, and return dict."""
if key_to_add not in dict_to_update:
dict_to_update[key_to_add] = value_to_add
return dict_to_update
def resolve_model_sagemaker_config_field(
field_name: str,
field_val: Optional[Any],
sagemaker_session: Session,
default_value: Optional[str] = None,
) -> Any:
"""Given a field name, checks if there is a sagemaker config value to set.
For the role field, which is customer-supplied, we allow ``field_val`` to take precedence
over sagemaker config values. For all other fields, sagemaker config values take precedence
over the JumpStart default fields.
"""
# In case, sagemaker_session is None, get sagemaker_config from load_sagemaker_config()
# to resolve value from config for the respective field_name parameter
_sagemaker_config = load_sagemaker_config() if (sagemaker_session is None) else None
# We allow customers to define a role which takes precedence
# over the one defined in sagemaker config
if field_name == "role":
return resolve_value_from_config(
direct_input=field_val,
config_path=MODEL_EXECUTION_ROLE_ARN_PATH,
default_value=default_value or sagemaker_session.get_caller_identity_arn(),
sagemaker_session=sagemaker_session,
sagemaker_config=_sagemaker_config,
)
# JumpStart Models have certain default field values. We want
# sagemaker config values to take priority over the model-specific defaults.
if field_name == "enable_network_isolation":
resolved_val = resolve_value_from_config(
direct_input=None,
config_path=MODEL_ENABLE_NETWORK_ISOLATION_PATH,
sagemaker_session=sagemaker_session,
default_value=default_value,
sagemaker_config=_sagemaker_config,
)
return resolved_val if resolved_val is not None else field_val
# field is not covered by sagemaker config so return as is
return field_val
def resolve_estimator_sagemaker_config_field(
field_name: str,
field_val: Optional[Any],
sagemaker_session: Session,
default_value: Optional[str] = None,
) -> Any:
"""Given a field name, checks if there is a sagemaker config value to set.
For the role field, which is customer-supplied, we allow ``field_val`` to take precedence
over sagemaker config values. For all other fields, sagemaker config values take precedence
over the JumpStart default fields.
"""
# Workaround for config injection if sagemaker_session is None, since in
# that case sagemaker_session will not be initialized until
# `_init_sagemaker_session_if_does_not_exist` is called later
_sagemaker_config = load_sagemaker_config() if (sagemaker_session is None) else None
# We allow customers to define a role which takes precedence
# over the one defined in sagemaker config
if field_name == "role":
return resolve_value_from_config(
direct_input=field_val,
config_path=TRAINING_JOB_ROLE_ARN_PATH,
default_value=default_value or sagemaker_session.get_caller_identity_arn(),
sagemaker_session=sagemaker_session,
sagemaker_config=_sagemaker_config,
)
# JumpStart Estimators have certain default field values. We want
# sagemaker config values to take priority over the model-specific defaults.
if field_name == "enable_network_isolation":
resolved_val = resolve_value_from_config(
direct_input=None,
config_path=TRAINING_JOB_ENABLE_NETWORK_ISOLATION_PATH,
sagemaker_session=sagemaker_session,
default_value=default_value,
sagemaker_config=_sagemaker_config,
)
return resolved_val if resolved_val is not None else field_val
if field_name == "encrypt_inter_container_traffic":
resolved_val = resolve_value_from_config(
direct_input=None,
config_path=TRAINING_JOB_INTER_CONTAINER_ENCRYPTION_PATH,
sagemaker_session=sagemaker_session,
default_value=default_value,
sagemaker_config=_sagemaker_config,
)
return resolved_val if resolved_val is not None else field_val
# field is not covered by sagemaker config so return as is
return field_val
def is_valid_model_id(
model_id: Optional[str],
region: Optional[str] = None,
model_version: Optional[str] = None,
script: enums.JumpStartScriptScope = enums.JumpStartScriptScope.INFERENCE,
sagemaker_session: Optional[Session] = constants.DEFAULT_JUMPSTART_SAGEMAKER_SESSION,
) -> bool:
"""Returns True if the model ID is supported for the given script.
Raises:
ValueError: If the script is not supported by JumpStart.
"""
if model_id in {None, ""}:
return False
if not isinstance(model_id, str):
return False
s3_client = sagemaker_session.s3_client if sagemaker_session else None
region = region or constants.JUMPSTART_DEFAULT_REGION_NAME
model_version = model_version or "*"
models_manifest_list = accessors.JumpStartModelsAccessor._get_manifest(
region=region, s3_client=s3_client
)
model_id_set = {model.model_id for model in models_manifest_list}
if script == enums.JumpStartScriptScope.INFERENCE:
return model_id in model_id_set
if script == enums.JumpStartScriptScope.TRAINING:
return (
model_id in model_id_set
and accessors.JumpStartModelsAccessor.get_model_specs(
region=region,
model_id=model_id,
version=model_version,
s3_client=s3_client,
).training_supported
)
raise ValueError(f"Unsupported script: {script}")
|
82002e8cc74e3c315e43be7da1aafa8362243b20
|
5db0fab37c2b8a618d85d3b60fab9f806c416474
|
/build-support/flake8/await_in_loop.py
|
fac47f3bbb86d8aab4f89453372e73530c556b57
|
[
"Apache-2.0"
] |
permissive
|
pantsbuild/pants
|
4988d1ac5474ec95f94ce2218aeb759401e4b011
|
98cbda8545f0d58c586ed2daa76fefd729d5e0d5
|
refs/heads/main
| 2023-09-05T03:44:17.646899
| 2023-09-01T19:52:09
| 2023-09-01T19:52:09
| 7,209,075
| 2,708
| 593
|
Apache-2.0
| 2023-09-14T19:33:33
| 2012-12-17T17:39:04
|
Python
|
UTF-8
|
Python
| false
| false
| 4,695
|
py
|
await_in_loop.py
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
"""Disallow 'await' in a loop."""
from __future__ import annotations
import ast
from contextlib import contextmanager
from pathlib import PurePath
from typing import Iterator, Sequence
def check_for_await_in_loop(tree: ast.AST, filename: str) -> Iterator[tuple[int, int, str, None]]:
path = PurePath(filename)
if (
not filename.startswith("src/python")
or path.stem.startswith("test_")
or path.stem.endswith("_test")
):
return
violations: list[tuple[int, int, str, None]] = []
class Visitor(ast.NodeVisitor):
def __init__(self):
# this isn't entirely correct: function/class definitions within a loop might have
# `await`s in them, but aren't _necessarily_ a problem (see example below).
#
# tasks = []
# for i in range(10):
# async def foo(i=i):
# await bar(i)
# tasks.append(foo())
# asyncio.gather(tasks)
self._in_loop = False
@contextmanager
def in_loop(self) -> Iterator[None]:
old = self._in_loop
self._in_loop = True
try:
yield
finally:
self._in_loop = old
def traverse(self, node: ast.AST | Sequence[ast.AST]):
if isinstance(node, ast.AST):
self.visit(node)
else:
for x in node:
self.visit(x)
def visit_for(self, node: ast.For | ast.AsyncFor):
"""Example::
[async] for MULTIPLE in await ONCE:
await MULTIPLE
else:
await ONCE
"""
self.visit(node.iter)
self.traverse(node.orelse)
with self.in_loop():
self.visit(node.target)
self.traverse(node.body)
visit_For = visit_AsyncFor = visit_for
def visit_While(self, node: ast.While):
"""Example:
while await MULTIPLE:
await MULTIPLE
"""
with self.in_loop():
self.generic_visit(node)
def visit_comp(self, node: ast.DictComp | ast.ListComp | ast.SetComp | ast.GeneratorExp):
"""Example::
[
await MULTIPLE
[async] for MULTIPLE in await ONCE
if MULTIPLE
for MULTIPLE in await MULTIPLE
]
"""
first_comp = node.generators[0]
self.visit(first_comp.iter)
with self.in_loop():
self.visit(first_comp.target)
for expr in first_comp.ifs:
self.visit(expr)
for other_comp in node.generators[1:]:
self.visit(other_comp)
if isinstance(node, ast.DictComp):
self.visit(node.key)
self.visit(node.value)
else:
self.visit(node.elt)
visit_ListComp = visit_GeneratorExp = visit_SetComp = visit_DictComp = visit_comp
def _await_that_could_be_multiget(self, node: ast.Await) -> bool:
"""Check for `await Get(...)` or `await MultiGet(...)` literally."""
value = node.value
# This checks for `await Get()` and `await MultiGet()` literally, because there's not
# currently MultiGet support for rule_helpers (i.e. `[await some_rule_helper(x) for x in
# ...]` cannot become `await MultiGet([rule_helper(x) for x in ...])` ). Once that's
# supported, this could flip to default to True, except for `await Effect`.
return (
isinstance(value, ast.Call)
and isinstance(value.func, ast.Name)
and value.func.id in ("Get", "MultiGet")
)
def visit_Await(self, node: ast.Await):
if self._in_loop and self._await_that_could_be_multiget(node):
violations.append(
(
node.lineno,
node.col_offset,
"PNT30 `await` in a loop may be a performance hazard: prefer concurrent requests via MultiGet, or add `# noqa: PNT30: <explanation>` if this is required",
None,
)
)
Visitor().visit(tree)
yield from violations
setattr(check_for_await_in_loop, "name", __name__)
setattr(check_for_await_in_loop, "version", "0.0.0")
|
1b64b6eda9614116a9d50c21f1338f7758c61940
|
279f415dd1e06c594c6c87deda57e201c73c4542
|
/egs2/tedx_spanish_openslr67/asr1/local/split_data.py
|
6692a4e7a70146bd2b32bd43a0e1eb4b9b81ad9d
|
[
"Apache-2.0"
] |
permissive
|
espnet/espnet
|
f7ba47271c1a6b1ed606dbbfb04a7f14220bb585
|
bcd20948db7846ee523443ef9fd78c7a1248c95e
|
refs/heads/master
| 2023-08-28T23:43:34.238336
| 2023-08-23T02:51:39
| 2023-08-23T02:51:39
| 114,054,873
| 7,242
| 2,244
|
Apache-2.0
| 2023-09-14T08:01:11
| 2017-12-13T00:45:11
|
Python
|
UTF-8
|
Python
| false
| false
| 1,046
|
py
|
split_data.py
|
# create train, dev, test split 90/5/5
# no speakers in the train set, 4M4F in test/dev sets
# 126 (94M, 32F), 8 (4M, 4F), 8 (4M, 4F)
import glob
import os
import random
m_speakers = []
f_speakers = []
for f in glob.glob(os.environ["TEDX_SPANISH"] + "/tedx_spanish_corpus/speech/*.wav"):
spkr = "_".join(f.split("/")[-1].split("_")[0:3])
if "M" in spkr and spkr not in m_speakers:
m_speakers.append(spkr)
elif "F" in spkr and spkr not in f_speakers:
f_speakers.append(spkr)
train = open("local/train.txt", "w")
dev = open("local/dev.txt", "w")
test = open("local/test.txt", "w")
random.shuffle(m_speakers)
random.shuffle(f_speakers)
train_list = m_speakers[0:94] + f_speakers[0:32]
dev_list = m_speakers[94:98] + f_speakers[32:36]
test_list = m_speakers[98:102] + f_speakers[36:40]
random.shuffle(train_list)
random.shuffle(dev_list)
random.shuffle(test_list)
for i in range(126):
train.write(train_list[i] + "\n")
for i in range(8):
dev.write(dev_list[i] + "\n")
test.write(test_list[i] + "\n")
|
5e552c28039d402a312b58e47262514fdc80a9fc
|
0e48483bd01170a8f4ca864de9b75bad50f55a94
|
/tests/integration/backward_compatible/threading_test.py
|
00dce33091c2111f2da50d0e262add73c134c2eb
|
[
"Apache-2.0"
] |
permissive
|
hazelcast/hazelcast-python-client
|
ed01e55165961ecb148a498ae3dd36503b64e93e
|
1723cc040c328ebc0d5ab44396c2f55bad7d9075
|
refs/heads/master
| 2023-09-04T02:29:29.753280
| 2023-08-14T09:13:03
| 2023-08-14T09:13:03
| 47,321,016
| 112
| 73
|
Apache-2.0
| 2023-09-11T17:59:53
| 2015-12-03T09:14:23
|
Python
|
UTF-8
|
Python
| false
| false
| 1,795
|
py
|
threading_test.py
|
import sys
import threading
from random import choice
from unittest import skip
from tests.base import SingleMemberTestCase
from tests.util import random_string
class ThreadingTest(SingleMemberTestCase):
@classmethod
def configure_client(cls, config):
config["cluster_name"] = cls.cluster.id
return config
def setUp(self):
self.map = self.client.get_map(random_string()).blocking()
@skip
def test_operation_from_multiple_threads(self):
num_threads = 4
num_iterations = 5000
value_size = 1000
key_range = 50
timeout = 300
keys = list(range(0, key_range))
exceptions = []
value = "v" * value_size
def put_get_remove():
for i in range(0, num_iterations):
if i % 100 == 0:
self.logger.info("op %i", i)
try:
key = choice(keys)
self.map.lock(key)
self.map.put(key, value)
self.assertEqual(value, self.map.get(key))
self.assertEqual(value, self.map.remove(key))
self.map.unlock(key)
except:
self.logger.exception("Exception in thread")
exceptions.append((threading.currentThread().getName(), sys.exc_info()))
threads = [self.start_new_thread(put_get_remove) for _ in range(0, num_threads)]
for t in threads:
t.join(timeout)
if t.isAlive():
self.fail("thread %s did not finish in %s seconds" % (t.getName(), timeout))
if exceptions:
name, exception = exceptions[0]
self.logger.exception("Exception in thread %s", name)
raise exception
|
77dda568e55db9cb761ec455bd9e028178fc8dc1
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/search/azure-search-documents/tests/async_tests/test_search_client_index_document_live_async.py
|
24b8f73f31035f88e055ff609f32f7cc4a020d62
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 6,927
|
py
|
test_search_client_index_document_live_async.py
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from pydoc import doc
import pytest
import time
from azure.core.exceptions import HttpResponseError
from azure.search.documents.aio import SearchClient
from devtools_testutils import AzureRecordedTestCase
from devtools_testutils.aio import recorded_by_proxy_async
from search_service_preparer import SearchEnvVarPreparer, search_decorator
TIME_TO_SLEEP = 3
class TestSearchClientDocumentsAsync(AzureRecordedTestCase):
@SearchEnvVarPreparer()
@search_decorator(schema="hotel_schema.json", index_batch="hotel_small.json")
@recorded_by_proxy_async
async def test_search_client_index_document(self, endpoint, api_key, index_name):
client = SearchClient(endpoint, index_name, api_key, retry_backoff_factor=60)
doc_count = 10
async with client:
doc_count = await self._test_upload_documents_new(client, doc_count)
doc_count = await self._test_upload_documents_existing(client, doc_count)
doc_count = await self._test_delete_documents_existing(client, doc_count)
doc_count = await self._test_delete_documents_missing(client, doc_count)
doc_count = await self._test_merge_documents_existing(client, doc_count)
doc_count = await self._test_merge_documents_missing(client, doc_count)
doc_count = await self._test_merge_or_upload_documents(client, doc_count)
async def _test_upload_documents_new(self, client, doc_count):
docs = [
{"hotelId": "1000", "rating": 5, "rooms": [], "hotelName": "Azure Inn"},
{"hotelId": "1001", "rating": 4, "rooms": [], "hotelName": "Redmond Hotel"},
]
results = await client.upload_documents(docs)
assert len(results) == len(docs)
assert set(x.status_code for x in results) == {201}
doc_count += len(docs)
# There can be some lag before a document is searchable
if self.is_live:
time.sleep(TIME_TO_SLEEP)
assert await client.get_document_count() == doc_count
for doc in docs:
result = await client.get_document(key=doc["hotelId"])
assert result["hotelId"] == doc["hotelId"]
assert result["hotelName"] == doc["hotelName"]
assert result["rating"] == doc["rating"]
assert result["rooms"] == doc["rooms"]
return doc_count
async def _test_upload_documents_existing(self, client, doc_count):
# add one new and one existing
docs = [
{"hotelId": "1002", "rating": 5, "rooms": [], "hotelName": "Azure Inn"},
{"hotelId": "3", "rating": 4, "rooms": [], "hotelName": "Redmond Hotel"},
]
results = await client.upload_documents(docs)
assert len(results) == len(docs)
doc_count += 1
assert set(x.status_code for x in results) == {200, 201}
return doc_count
async def _test_delete_documents_existing(self, client, doc_count):
docs = [{"hotelId": "3"}, {"hotelId": "4"}]
results = await client.delete_documents(docs)
assert len(results) == len(docs)
assert set(x.status_code for x in results) == {200}
doc_count -= len(docs)
# There can be some lag before a document is searchable
if self.is_live:
time.sleep(TIME_TO_SLEEP)
assert await client.get_document_count() == doc_count
with pytest.raises(HttpResponseError):
await client.get_document(key="3")
with pytest.raises(HttpResponseError):
await client.get_document(key="4")
return doc_count
async def _test_delete_documents_missing(self, client, doc_count):
# delete one existing and one missing
docs = [{"hotelId": "1003"}, {"hotelId": "2"}]
results = await client.delete_documents(docs)
assert len(results) == len(docs)
assert set(x.status_code for x in results) == {200}
doc_count -= 1
# There can be some lag before a document is searchable
if self.is_live:
time.sleep(TIME_TO_SLEEP)
assert await client.get_document_count() == doc_count
with pytest.raises(HttpResponseError):
await client.get_document(key="1003")
with pytest.raises(HttpResponseError):
await client.get_document(key="2")
return doc_count
async def _test_merge_documents_existing(self, client, doc_count):
docs = [{"hotelId": "5", "rating": 1}, {"hotelId": "6", "rating": 2}]
results = await client.merge_documents(docs)
assert len(results) == len(docs)
assert set(x.status_code for x in results) == {200}
# There can be some lag before a document is searchable
if self.is_live:
time.sleep(TIME_TO_SLEEP)
assert await client.get_document_count() == doc_count
result = await client.get_document(key="5")
assert result["rating"] == 1
result = await client.get_document(key="6")
assert result["rating"] == 2
return doc_count
async def _test_merge_documents_missing(self, client, doc_count):
# merge to one existing and one missing document
docs = [{"hotelId": "1003", "rating": 1}, {"hotelId": "1", "rating": 2}]
results = await client.merge_documents(docs)
assert len(results) == len(docs)
assert set(x.status_code for x in results) == {200, 404}
# There can be some lag before a document is searchable
if self.is_live:
time.sleep(TIME_TO_SLEEP)
assert await client.get_document_count() == doc_count
with pytest.raises(HttpResponseError):
await client.get_document(key="1003")
result = await client.get_document(key="1")
assert result["rating"] == 2
return doc_count
async def _test_merge_or_upload_documents(self, client, doc_count):
# merge to one existing and one missing
docs = [{"hotelId": "1003", "rating": 1}, {"hotelId": "1", "rating": 2}]
results = await client.merge_or_upload_documents(docs)
assert len(results) == len(docs)
assert set(x.status_code for x in results) == {200, 201}
doc_count += 1
# There can be some lag before a document is searchable
if self.is_live:
time.sleep(TIME_TO_SLEEP)
assert await client.get_document_count() == doc_count
result = await client.get_document(key="1003")
assert result["rating"] == 1
result = await client.get_document(key="1")
assert result["rating"] == 2
return doc_count
|
5e1a0dbf05923b37b90d66627727da342d894863
|
fa3f6d4e9169fb95f828013d179d03accdff381b
|
/grr/server/grr_response_server/databases/mysql_blob_keys.py
|
c9b853a754af5911c152ff17a66395c8727bfdde
|
[
"Apache-2.0"
] |
permissive
|
google/grr
|
c51a2bd251ed2f7adae538541990a2cc01fdcc8c
|
44c0eb8c938302098ef7efae8cfd6b90bcfbb2d6
|
refs/heads/master
| 2023-09-05T20:02:36.823914
| 2023-07-26T09:34:09
| 2023-07-26T09:34:09
| 14,909,673
| 4,683
| 927
|
Apache-2.0
| 2023-07-26T09:34:10
| 2013-12-04T00:17:53
|
Python
|
UTF-8
|
Python
| false
| false
| 2,122
|
py
|
mysql_blob_keys.py
|
#!/usr/bin/env python
"""A module with MySQL implementation of blobstore encryption keys methods."""
from __future__ import annotations
from typing import Collection
from typing import Dict
from typing import Optional
import MySQLdb
from grr_response_server.databases import mysql_utils
from grr_response_server.rdfvalues import objects as rdf_objects
class MySQLDBBlobKeysMixin(object):
"""A MySQL database mixin class with blobstore encryption keys methods."""
@mysql_utils.WithTransaction()
def WriteBlobEncryptionKeys(
self,
key_names: Dict[rdf_objects.BlobID, str],
cursor: MySQLdb.cursors.Cursor,
) -> None:
"""Associates the specified blobs with the given encryption keys."""
query = """
INSERT
INTO blob_encryption_keys(blob_id, key_name)
VALUES (%s, %s)
"""
args = []
for blob_id, key_name in key_names.items():
args.append((blob_id.AsBytes(), key_name))
cursor.executemany(query, args)
@mysql_utils.WithTransaction(readonly=True)
def ReadBlobEncryptionKeys(
self,
blob_ids: Collection[rdf_objects.BlobID],
cursor: MySQLdb.cursors.Cursor,
) -> Dict[rdf_objects.BlobID, Optional[str]]:
"""Retrieves encryption keys associated with blobs."""
# A special case for empty list of blob identifiers to avoid syntax errors
# in the query below.
if not blob_ids:
return {}
blob_ids_bytes = [blob_id.AsBytes() for blob_id in blob_ids]
query = """
SELECT k.blob_id, k.key_name
FROM blob_encryption_keys AS k
INNER JOIN (SELECT blob_id, MAX(timestamp) AS max_timestamp
FROM blob_encryption_keys
WHERE blob_id IN ({})
GROUP BY blob_id) AS last_k
ON k.blob_id = last_k.blob_id
AND k.timestamp = last_k.max_timestamp
""".format(",".join(["%s"] * len(blob_ids_bytes)))
results = {blob_id: None for blob_id in blob_ids}
cursor.execute(query, blob_ids_bytes)
for blob_id_bytes, key_name in cursor.fetchall():
blob_id = rdf_objects.BlobID(blob_id_bytes)
results[blob_id] = key_name
return results
|
55d934fc5af2084dd75a3b78b11d4431b6e52297
|
94c1805df5a09c39159d502f420d19ad54b567fc
|
/runtime/deps/gyp/test/rules-variables/gyptest-rules-variables.py
|
c1825e0c1363857ebca6e03fcf1477ab74054b43
|
[
"BSD-3-Clause",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
tmikov/jscomp
|
9805a5a4d06520549c57380f0df4a1c0aa0dab56
|
83828441cb38ec96603a6a60be06977d4852940a
|
refs/heads/develop
| 2021-01-19T02:56:35.102659
| 2016-04-12T06:19:30
| 2016-04-12T06:19:30
| 36,981,674
| 237
| 13
|
Apache-2.0
| 2018-10-14T09:48:12
| 2015-06-06T13:49:26
|
C
|
UTF-8
|
Python
| false
| false
| 806
|
py
|
gyptest-rules-variables.py
|
#!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies rules related variables are expanded.
"""
import sys
if sys.platform == 'win32':
print "This test is currently disabled: https://crbug.com/483696."
sys.exit(0)
import TestGyp
test = TestGyp.TestGyp(formats=['ninja'])
test.relocate('src', 'relocate/src')
test.run_gyp('variables.gyp', chdir='relocate/src')
test.build('variables.gyp', chdir='relocate/src')
test.run_built_executable('all_rule_variables',
chdir='relocate/src',
stdout="input_root\ninput_dirname\ninput_path\n" +
"input_ext\ninput_name\n")
test.pass_test()
|
1caab6f8f1b3c4d5c128b97f49991167d997bcbe
|
d629e0d6ca0a3bb0c270879b2388ad5e23ed7e9d
|
/tests/mypy/module/success.py
|
a48fafd22f86be81ae79b220e28760e6ee5e1f00
|
[
"MIT"
] |
permissive
|
seandstewart/typical
|
942645157dcd70a69fbe1957f88b7b0a850528b7
|
97eec305c094f265abc49051150be56a2fe17a07
|
refs/heads/main
| 2023-08-18T00:35:48.713176
| 2023-04-26T16:56:57
| 2023-04-26T16:56:57
| 175,726,271
| 190
| 15
|
MIT
| 2023-07-25T21:39:11
| 2019-03-15T01:24:36
|
Python
|
UTF-8
|
Python
| false
| false
| 716
|
py
|
success.py
|
import typic
@typic.klass
class Klass:
attr: str
class Other:
def __init__(self, attr: str):
self.attr = attr
if __name__ == "__main__":
Klass(attr="foo")
Klass("foo")
Klass.transmute("foo")
Klass.validate({"attr": "foo"})
Klass("foo").primitive()
Klass("foo").primitive(lazy=True)
Klass("foo").tojson()
Klass("foo").tojson(indent=0)
Klass("foo").tojson(ensure_ascii=False)
typic.primitive(Klass("foo"))
k: Klass = typic.transmute(Klass, "foo")
v = typic.validate(Klass, {"attr": "foo"})
j: str = typic.tojson(Klass("foo"))
o: Other = Klass("foo").translate(Other)
fields = [*Klass("foo").iterate()]
iterfields = [*Klass("foo")]
|
dbe6eedae8e75a58df170c7a8c56d3bef81aacee
|
ec4f9f4df4497507c2f0f0c64afbf68f07b7e62a
|
/sqlathanor/declarative/declarative_base.py
|
efcbef27ff0b428528a598403ef1e2765a93d9f4
|
[
"MIT"
] |
permissive
|
insightindustry/sqlathanor
|
70c4fe11081e54aadbf661770a016d8db3e1cc2a
|
a5cfd349d092b25a3ffb3950b996b13878e1db17
|
refs/heads/master
| 2022-05-22T20:01:10.571987
| 2020-12-28T18:00:42
| 2020-12-28T18:00:42
| 137,494,226
| 105
| 7
|
MIT
| 2022-04-28T19:34:34
| 2018-06-15T14:00:46
|
Python
|
UTF-8
|
Python
| false
| false
| 3,213
|
py
|
declarative_base.py
|
# -*- coding: utf-8 -*-
# The lack of a module docstring for this module is **INTENTIONAL**.
# The module is imported into the documentation using Sphinx's autodoc
# extension, and its member function documentation is automatically incorporated
# there as needed.
from sqlalchemy.ext.declarative import declarative_base as SA_declarative_base
from validator_collection import checkers
from sqlathanor.declarative.base_model import BaseModel
# pylint: disable=no-member
def declarative_base(cls = BaseModel, **kwargs):
"""Construct a base class for declarative class definitions.
The new base class will be given a metaclass that produces appropriate
:class:`Table <sqlalchemy:sqlalchemy.schema.Table>` objects and makes the
appropriate :func:`mapper <sqlalchemy:sqlalchemy.orm.mapper>` calls based on the
information provided declaratively in the class and any subclasses of the class.
:param cls: Defaults to :class:`BaseModel` to provide serialization/de-serialization
support.
If a :class:`tuple <python:tuple>` of classes, will include :class:`BaseModel`
in that list of classes to mixin serialization/de-serialization support.
If not :obj:`None <python:None>` and not a :class:`tuple <python:tuple>`, will mixin
:class:`BaseModel` with the value passed to provide
serialization/de-serialization support.
:type cls: :obj:`None <python:None>` / :class:`tuple <python:tuple>` of
classes / class object
:param kwargs: Additional keyword arguments supported by the original
:func:`sqlalchemy.ext.declarative.declarative_base() <sqlalchemy:sqlalchemy.ext.declarative.declarative_base>`
function
:type kwargs: keyword arguments
:returns: Base class for declarative class definitions with support for
serialization and de-serialization.
"""
if isinstance(cls, tuple):
class_list = [x for x in cls]
class_list.insert(0, BaseModel)
cls = (x for x in class_list)
elif checkers.is_iterable(cls):
class_list = [BaseModel]
class_list.extend(cls)
cls = (x for x in class_list)
return SA_declarative_base(cls = cls, **kwargs)
def as_declarative(**kw):
"""Class decorator for :func:`declarative_base`.
Provides a syntactical shortcut to the ``cls`` argument
sent to :func:`declarative_base`, allowing the base class
to be converted in-place to a "declarative" base:
.. code-block:: python
from sqlathanor import as_declarative
@as_declarative()
class Base(object):
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
id = Column(Integer,
primary_key = True,
supports_csv = True)
class MyMappedClass(Base):
# ...
.. tip::
All keyword arguments passed to :func:`as_declarative` are passed
along to :func:`declarative_base`.
.. seealso::
* :func:`declarative_base() <declarative_base>`
"""
def decorate(cls):
kw['cls'] = cls
kw['name'] = cls.__name__
return declarative_base(**kw)
return decorate
|
d8d9cffcc782b6f2ad68863a85a5ccdee7b3f423
|
adf3ca8f0ce8b3333d70c83118932d0e46d90182
|
/tiledb/tests/cc/test_vfs.py
|
3bfb76758354c8d412faf1b94f2cb86ce92edc05
|
[
"MIT"
] |
permissive
|
TileDB-Inc/TileDB-Py
|
77ccce93e1607ce4280d57d2f715cf490aa9f0d3
|
223ee9939e23ecb618bd98d89dc9e874ebd47a2d
|
refs/heads/dev
| 2023-08-28T14:13:52.112262
| 2023-08-25T20:03:54
| 2023-08-26T01:19:46
| 91,851,641
| 167
| 35
|
MIT
| 2023-09-14T19:07:10
| 2017-05-19T23:06:53
|
Python
|
UTF-8
|
Python
| false
| false
| 849
|
py
|
test_vfs.py
|
import os
import tiledb.cc as lt
def test_dir(tmp_path):
ctx = lt.Context()
vfs = lt.VFS(ctx)
path = os.path.join(tmp_path, "test_dir")
vfs._create_dir(path)
assert vfs._is_dir(path) is True
assert vfs._dir_size(path) == 0
vfs._remove_dir(path)
assert vfs._is_dir(path) is False
def test_file_handle(tmp_path):
ctx = lt.Context()
vfs = lt.VFS(ctx)
path = os.path.join(tmp_path, "test_file_handle")
fh = lt.FileHandle(ctx, vfs, path, lt.VFSMode.WRITE)
fh._write(b"Hello")
fh = lt.FileHandle(ctx, vfs, path, lt.VFSMode.READ)
assert fh._read(0, 5) == b"Hello"
fh = lt.FileHandle(ctx, vfs, path, lt.VFSMode.APPEND)
fh._write(b", world!")
fh = lt.FileHandle(ctx, vfs, path, lt.VFSMode.READ)
assert fh._read(0, 13) == b"Hello, world!"
assert fh._closed is False
|
5c108c7ee51c14f01106f561274fe9f792079f7e
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/tools/grit/grit/node/mapping.py
|
2fef690ac0bb7569613eccca6db5dab3755458c5
|
[
"BSD-3-Clause"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 1,731
|
py
|
mapping.py
|
# Copyright 2012 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Maps each node type to an implementation class.
When adding a new node type, you add to this mapping.
'''
from grit import exception
from grit.node import empty
from grit.node import include
from grit.node import message
from grit.node import misc
from grit.node import node_io
from grit.node import structure
from grit.node import variant
_ELEMENT_TO_CLASS = {
'identifiers' : empty.IdentifiersNode,
'includes' : empty.IncludesNode,
'messages' : empty.MessagesNode,
'outputs' : empty.OutputsNode,
'structures' : empty.StructuresNode,
'translations' : empty.TranslationsNode,
'include' : include.IncludeNode,
'emit' : node_io.EmitNode,
'file' : node_io.FileNode,
'output' : node_io.OutputNode,
'ex' : message.ExNode,
'message' : message.MessageNode,
'ph' : message.PhNode,
'else' : misc.ElseNode,
'grit' : misc.GritNode,
'identifier' : misc.IdentifierNode,
'if' : misc.IfNode,
'part' : misc.PartNode,
'release' : misc.ReleaseNode,
'then' : misc.ThenNode,
'structure' : structure.StructureNode,
'skeleton' : variant.SkeletonNode,
}
def ElementToClass(name, typeattr):
'''Maps an element to a class that handles the element.
Args:
name: 'element' (the name of the element)
typeattr: 'type' (the value of the type attribute, if present, else None)
Return:
type
'''
if name not in _ELEMENT_TO_CLASS:
raise exception.UnknownElement()
return _ELEMENT_TO_CLASS[name]
|
1e93c0418192ec363da8c24816559238d18d2720
|
2c4c3f777d94157d5a5cf8664907de1a605a1110
|
/utils/data_loader.py
|
24c6cc6930e9197c264a811cb3a55322b70f0580
|
[
"Apache-2.0"
] |
permissive
|
safe-graph/DGFraud
|
a86715662d86291c22dae389aa36d72b74042ab6
|
22b72d75f81dd057762f0c7225a4558a25095b8f
|
refs/heads/master
| 2023-08-23T01:01:04.195966
| 2022-04-20T21:39:08
| 2022-04-20T21:39:08
| 223,415,751
| 632
| 162
|
Apache-2.0
| 2020-07-31T04:10:54
| 2019-11-22T14:02:36
|
Python
|
UTF-8
|
Python
| false
| false
| 7,253
|
py
|
data_loader.py
|
import numpy as np
from sklearn.model_selection import train_test_split
import scipy.io as sio
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), '..')))
from utils.utils import pad_adjlist
import zipfile
# zip_src = '../dataset/DBLP4057_GAT_with_idx_tra200_val_800.zip'
# dst_dir = '../dataset'
def unzip_file(zip_src, dst_dir):
iz = zipfile.is_zipfile(zip_src)
if iz:
zf = zipfile.ZipFile(zip_src, 'r')
for file in zf.namelist():
zf.extract(file, dst_dir)
else:
print('Zip Error.')
def load_data_dblp(path='../../dataset/DBLP4057_GAT_with_idx_tra200_val_800.mat'):
data = sio.loadmat(path)
truelabels, features = data['label'], data['features'].astype(float)
N = features.shape[0]
rownetworks = [data['net_APA'] - np.eye(N)]
# rownetworks = [data['net_APA'] - np.eye(N), data['net_APCPA'] - np.eye(N), data['net_APTPA'] - np.eye(N)]
y = truelabels
index = range(len(y))
X_train, X_test, y_train, y_test = train_test_split(index, y, stratify=y, test_size=0.4, random_state=48,
shuffle=True)
return rownetworks, features, X_train, y_train, X_test, y_test
def load_example_semi():
# example data for SemiGNN
features = np.array([[1, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 0, 0, 0],
[0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 1]
])
N = features.shape[0]
# Here we use binary matrix as adjacency matrix, weighted matrix is acceptable as well
rownetworks = [np.array([[1, 0, 0, 1, 0, 1, 1, 1],
[1, 0, 0, 1, 1, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 1],
[0, 1, 0, 0, 1, 1, 1, 0],
[0, 1, 1, 1, 0, 1, 0, 0],
[1, 0, 0, 1, 1, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 1],
[0, 1, 0, 0, 1, 1, 1, 0]]),
np.array([[1, 0, 0, 0, 0, 1, 1, 1],
[0, 1, 0, 0, 1, 1, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 1],
[1, 1, 0, 1, 1, 0, 0, 0],
[1, 0, 0, 1, 0, 1, 1, 1],
[1, 0, 0, 1, 1, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 1]])]
y = np.array([[0, 1], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [0, 1]])
index = range(len(y))
X_train, X_test, y_train, y_test = train_test_split(index, y, stratify=y, test_size=0.2, random_state=48,
shuffle=True) # test_size=0.25 batch——size=2
return rownetworks, features, X_train, y_train, X_test, y_test
def load_example_gem():
# example data for GEM
# node=8 p=7 D=2
features = np.array([[5, 3, 0, 1, 0, 0, 0, 1, 0],
[2, 3, 1, 2, 0, 0, 0, 1, 0],
[3, 1, 6, 4, 0, 0, 1, 1, 0],
[0, 0, 2, 4, 4, 1, 0, 1, 1],
[0, 0, 3, 3, 1, 0, 1, 0, 1],
[1, 2, 5, 1, 4, 1, 0, 0, 1],
[0, 1, 3, 5, 1, 0, 0, 0, 1],
[0, 3, 4, 5, 2, 1, 1, 0, 1]
])
N = features.shape[0]
rownetworks = [np.array([[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]),
np.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 1, 1]])]
# y = np.array([-1, -1, -1, -1, 1, 1, 1, 1])
y = np.array([0, 0, 0, 0, 1, 1, 1, 1])
y = y[:, np.newaxis]
index = range(len(y))
X_train, X_test, y_train, y_test = train_test_split(index, y, stratify=y, test_size=0.2, random_state=8,
shuffle=True)
return rownetworks, features, X_train, y_train, X_test, y_test
def load_data_gas():
# example data for GAS
# construct U-E-I network
user_review_adj = [[0, 1], [2], [3], [5], [4, 6]]
user_review_adj = pad_adjlist(user_review_adj)
user_item_adj = [[0, 1], [0], [0], [2], [1, 2]]
user_item_adj = pad_adjlist(user_item_adj)
item_review_adj = [[0, 2, 3], [1, 4], [5, 6]]
item_review_adj = pad_adjlist(item_review_adj)
item_user_adj = [[0, 1, 2], [0, 4], [3, 4]]
item_user_adj = pad_adjlist(item_user_adj)
review_item_adj = [0, 1, 0, 0, 1, 2, 2]
review_user_adj = [0, 0, 1, 2, 4, 3, 4]
# initialize review_vecs
review_vecs = np.array([[1, 0, 0, 1, 0],
[1, 0, 0, 1, 1],
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 1],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[1, 1, 0, 1, 1]])
# initialize user_vecs and item_vecs with user_review_adj and item_review_adj
# for example, u0 has r1 and r0, then we get the first line of user_vecs: [1, 1, 0, 0, 0, 0, 0]
user_vecs = np.array([[1, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0, 1]])
item_vecs = np.array([[1, 0, 1, 1, 0, 0, 0],
[0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 1]])
features = [review_vecs, user_vecs, item_vecs]
# initialize the Comment Graph
homo_adj = [[1, 0, 0, 0, 1, 1, 1],
[1, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 1, 1, 1, 0],
[1, 0, 1, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 0, 1, 0, 0],
[0, 1, 0, 0, 1, 0, 0]]
adjs = [user_review_adj, user_item_adj, item_review_adj, item_user_adj, review_user_adj, review_item_adj, homo_adj]
y = np.array([[0, 1], [1, 0], [1, 0], [0, 1], [1, 0], [1, 0], [0, 1], [1, 0]])
index = range(len(y))
X_train, X_test, y_train, y_test = train_test_split(index, y, stratify=y, test_size=0.4, random_state=48,
shuffle=True)
return adjs, features, X_train, y_train, X_test, y_test
|
de34346812fa4c132a3e672127bb533c8d4e549b
|
dfa1a1a263eab3ac8bbcb2a00297da7fc82bccfd
|
/src/graphs/toplogical_sort_bfs.py
|
c91750c1e740118145884b6015d84fd06b8f9166
|
[] |
no_license
|
monpro/algorithm
|
07e79e7a85ca9fe86fac0b3c740de2f2037f5e89
|
a330e92191642e2965939a06b050ca84d4ed11a6
|
refs/heads/master
| 2021-07-01T03:49:59.040611
| 2020-08-25T12:36:54
| 2020-08-25T12:36:54
| 143,118,129
| 102
| 0
| null | 2020-05-31T04:22:46
| 2018-08-01T07:12:45
|
Python
|
UTF-8
|
Python
| false
| false
| 1,039
|
py
|
toplogical_sort_bfs.py
|
"""
Definition for a Directed graph node
class DirectedGraphNode:
def __init__(self, x):
self.label = x
self.neighbors = []
"""
class Solution:
"""
@param: graph: A list of Directed graph node
@return: Any topological order for the given graph.
"""
def topSort(self, graph):
# write your code here
# indegree_hash_map for every node
indegree_hash_map = dict((i, 0) for i in graph)
for i in indegree_hash_map:
for j in i.neighbors:
indegree_hash_map[j] += 1
queue, result = [], []
# get all 0- degree-node
for i in indegree_hash_map:
if indegree_hash_map[i] == 0:
queue.append(i)
while queue:
node = queue.pop(0)
result.append(node)
for i in node.neighbors:
indegree_hash_map[i] -= 1
if indegree_hash_map[i] == 0:
queue.append(i)
return result
|
5dd691f26049b91e38d1a542aec5cec5d23bfea4
|
7343ece3b82ac87a594865c4074623b45b0297b4
|
/synapse/util/linked_list.py
|
8efbf061aaae9461ee322668315edd903201b112
|
[
"Apache-2.0"
] |
permissive
|
matrix-org/synapse
|
a00111f83310783b78e2996557f8bbae4d9fb229
|
d35bed8369514fe727b4fe1afb68f48cc8b2655a
|
refs/heads/develop
| 2023-09-05T05:24:20.808942
| 2023-09-04T16:14:09
| 2023-09-04T16:14:09
| 22,844,864
| 12,215
| 2,869
|
Apache-2.0
| 2023-09-14T15:20:48
| 2014-08-11T15:51:42
|
Python
|
UTF-8
|
Python
| false
| false
| 5,006
|
py
|
linked_list.py
|
# Copyright 2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A circular doubly linked list implementation.
"""
import threading
from typing import Generic, Optional, Type, TypeVar
P = TypeVar("P")
LN = TypeVar("LN", bound="ListNode")
class ListNode(Generic[P]):
"""A node in a circular doubly linked list, with an (optional) reference to
a cache entry.
The reference should only be `None` for the root node or if the node has
been removed from the list.
"""
# A lock to protect mutating the list prev/next pointers.
_LOCK = threading.Lock()
# We don't use attrs here as in py3.6 you can't have `attr.s(slots=True)`
# and inherit from `Generic` for some reason
__slots__ = [
"cache_entry",
"prev_node",
"next_node",
]
def __init__(self, cache_entry: Optional[P] = None) -> None:
self.cache_entry = cache_entry
self.prev_node: Optional[ListNode[P]] = None
self.next_node: Optional[ListNode[P]] = None
@classmethod
def create_root_node(cls: Type["ListNode[P]"]) -> "ListNode[P]":
"""Create a new linked list by creating a "root" node, which is a node
that has prev_node/next_node pointing to itself and no associated cache
entry.
"""
root = cls()
root.prev_node = root
root.next_node = root
return root
@classmethod
def insert_after(
cls: Type[LN],
cache_entry: P,
node: "ListNode[P]",
) -> LN:
"""Create a new list node that is placed after the given node.
Args:
cache_entry: The associated cache entry.
node: The existing node in the list to insert the new entry after.
"""
new_node = cls(cache_entry)
with cls._LOCK:
new_node._refs_insert_after(node)
return new_node
def remove_from_list(self) -> None:
"""Remove this node from the list."""
with self._LOCK:
self._refs_remove_node_from_list()
# We drop the reference to the cache entry to break the reference cycle
# between the list node and cache entry, allowing the two to be dropped
# immediately rather than at the next GC.
self.cache_entry = None
def move_after(self, node: "ListNode[P]") -> None:
"""Move this node from its current location in the list to after the
given node.
"""
with self._LOCK:
# We assert that both this node and the target node is still "alive".
assert self.prev_node
assert self.next_node
assert node.prev_node
assert node.next_node
assert self is not node
# Remove self from the list
self._refs_remove_node_from_list()
# Insert self back into the list, after target node
self._refs_insert_after(node)
def _refs_remove_node_from_list(self) -> None:
"""Internal method to *just* remove the node from the list, without
e.g. clearing out the cache entry.
"""
if self.prev_node is None or self.next_node is None:
# We've already been removed from the list.
return
prev_node = self.prev_node
next_node = self.next_node
prev_node.next_node = next_node
next_node.prev_node = prev_node
# We set these to None so that we don't get circular references,
# allowing us to be dropped without having to go via the GC.
self.prev_node = None
self.next_node = None
def _refs_insert_after(self, node: "ListNode[P]") -> None:
"""Internal method to insert the node after the given node."""
# This method should only be called when we're not already in the list.
assert self.prev_node is None
assert self.next_node is None
# We expect the given node to be in the list and thus have valid
# prev/next refs.
assert node.next_node
assert node.prev_node
prev_node = node
next_node = node.next_node
self.prev_node = prev_node
self.next_node = next_node
prev_node.next_node = self
next_node.prev_node = self
def get_cache_entry(self) -> Optional[P]:
"""Get the cache entry, returns None if this is the root node (i.e.
cache_entry is None) or if the entry has been dropped.
"""
return self.cache_entry
|
ae2b682ee8a1b9d4db148dbb4b94f1902b74c8ae
|
7418bfa84537014885da5414c22c6b926101b136
|
/kfac/python/kernel_tests/optimizer_test.py
|
3cd06024016525271a51efc937febaa43b0f2eb9
|
[
"Apache-2.0"
] |
permissive
|
tensorflow/kfac
|
a024789cfa99f08f9488d8be930ca233a874e860
|
ddad6375bbdebfae809bccfd3a5c3db073128764
|
refs/heads/master
| 2023-09-04T08:45:24.299663
| 2022-02-11T18:05:09
| 2022-02-11T20:33:56
| 119,905,182
| 193
| 44
|
Apache-2.0
| 2021-08-30T19:54:19
| 2018-02-01T23:14:00
|
Python
|
UTF-8
|
Python
| false
| false
| 7,517
|
py
|
optimizer_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for kfac.optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow.compat.v1 as tf
from kfac.python.ops import fisher_factors as ff
from kfac.python.ops import layer_collection as lc
from kfac.python.ops import optimizer
def dummy_layer_collection():
lcoll = lc.LayerCollection()
dummy = tf.constant([1., 2.])
lcoll.register_categorical_predictive_distribution(logits=dummy)
return lcoll
class OptimizerTest(tf.test.TestCase):
def testOptimizerInitInvalidMomentumRegistration(self):
with self.assertRaises(ValueError):
optimizer.KfacOptimizer(
0.1, 0.2, lc.LayerCollection(), 0.3, momentum_type='foo')
def testOptimizerInit(self):
with tf.Graph().as_default():
layer_collection = lc.LayerCollection()
inputs = tf.ones((2, 1)) * 2
weights_val = np.ones((1, 1), dtype=np.float32) * 3.
weights = tf.get_variable('w', initializer=tf.constant(weights_val))
bias = tf.get_variable(
'b', initializer=tf.zeros_initializer(), shape=(1, 1))
output = tf.matmul(inputs, weights) + bias
layer_collection.register_fully_connected((weights, bias), inputs, output)
logits = tf.tanh(output)
targets = tf.constant([[0.], [1.]])
output = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=targets))
layer_collection.register_categorical_predictive_distribution(logits)
optimizer.KfacOptimizer(
0.1,
0.2,
layer_collection,
0.3,
momentum=0.5,
momentum_type='regular')
def testSquaredFisherNorm(self):
with tf.Graph().as_default(), self.test_session() as sess:
grads_and_vars = [(tf.constant([[1., 2.], [3., 4.]]), None),
(tf.constant([[2., 3.], [4., 5.]]), None)]
pgrads_and_vars = [(tf.constant([[3., 4.], [5., 6.]]), None),
(tf.constant([[7., 8.], [9., 10.]]), None)]
opt = optimizer.KfacOptimizer(0.1, 0.2, dummy_layer_collection(), 0.3)
sq_norm = opt._squared_fisher_norm(grads_and_vars, pgrads_and_vars)
self.assertAlmostEqual(174., sess.run(sq_norm), places=5)
def testUpdateClipCoeff(self):
with tf.Graph().as_default(), self.test_session() as sess:
grads_and_vars = [(tf.constant([[1., 2.], [3., 4.]]), None),
(tf.constant([[2., 3.], [4., 5.]]), None)]
pgrads_and_vars = [(tf.constant([[3., 4.], [5., 6.]]), None),
(tf.constant([[7., 8.], [9., 10.]]), None)]
lrate = 0.1
# Note: without rescaling, the squared Fisher norm of the update
# is 1.74
# If the update already satisfies the norm constraint, there should
# be no rescaling.
opt = optimizer.KfacOptimizer(
lrate, 0.2, dummy_layer_collection(), 0.3, norm_constraint=10.,
name='KFAC_1')
coeff = opt._update_clip_coeff(grads_and_vars, pgrads_and_vars)
self.assertAlmostEqual(1., sess.run(coeff), places=5)
# If the update violates the constraint, it should be rescaled to
# be on the constraint boundary.
opt = optimizer.KfacOptimizer(
lrate, 0.2, dummy_layer_collection(), 0.3, norm_constraint=0.5,
name='KFAC_2')
coeff = opt._update_clip_coeff(grads_and_vars, pgrads_and_vars)
sq_norm_pgrad = opt._squared_fisher_norm(grads_and_vars, pgrads_and_vars)
sq_norm_update = lrate**2 * coeff**2 * sq_norm_pgrad
self.assertAlmostEqual(0.5, sess.run(sq_norm_update), places=5)
def testUpdateVelocities(self):
with tf.Graph().as_default(), self.test_session() as sess:
layers = lc.LayerCollection()
layers.register_categorical_predictive_distribution(tf.constant([1.0]))
opt = optimizer.KfacOptimizer(
0.1, 0.2, layers, 0.3, momentum=0.5, momentum_type='regular')
x = tf.get_variable('x', initializer=tf.ones((2, 2)))
y = tf.get_variable('y', initializer=tf.ones((2, 2)) * 2)
vec1 = tf.ones((2, 2)) * 3
vec2 = tf.ones((2, 2)) * 4
model_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
update_op = opt._update_velocities([(vec1, x), (vec2, y)], 0.5)
opt_vars = [
v for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
if v not in model_vars
]
sess.run(tf.global_variables_initializer())
old_opt_vars = sess.run(opt_vars)
# Optimizer vars start out at 0.
for opt_var in old_opt_vars:
self.assertAllEqual(sess.run(tf.zeros_like(opt_var)), opt_var)
sess.run(update_op)
new_opt_vars = sess.run(opt_vars)
# After one update, the velocities are equal to the vectors.
for vec, opt_var in zip([vec1, vec2], new_opt_vars):
self.assertAllEqual(sess.run(vec), opt_var)
sess.run(update_op)
final_opt_vars = sess.run(opt_vars)
for first, second in zip(new_opt_vars, final_opt_vars):
self.assertFalse(np.equal(first, second).all())
def testApplyGradients(self):
with tf.Graph().as_default(), self.test_session() as sess:
layer_collection = lc.LayerCollection()
inputs = tf.ones((2, 1)) * 2
weights_val = np.ones((1, 1), dtype=np.float32) * 3.
weights = tf.get_variable('w', initializer=tf.constant(weights_val))
bias = tf.get_variable(
'b', initializer=tf.zeros_initializer(), shape=(1, 1))
output = tf.matmul(inputs, weights) + bias
layer_collection.register_fully_connected((weights, bias), inputs, output)
preds = output
targets = tf.constant([[0.34], [1.56]])
output = tf.reduce_mean(tf.square(targets - preds))
layer_collection.register_squared_error_loss(preds)
opt = optimizer.KfacOptimizer(
0.1,
0.2,
layer_collection,
cov_ema_decay=0.3,
momentum=0.5,
momentum_type='regular')
(cov_update_thunks,
inv_update_thunks) = opt.make_vars_and_create_op_thunks()
cov_update_ops = tuple(thunk() for thunk in cov_update_thunks)
inv_update_ops = tuple(thunk() for thunk in inv_update_thunks)
grads_and_vars = opt.compute_gradients(output, [weights, bias])
all_vars = [grad_and_var[1] for grad_and_var in grads_and_vars]
op = opt.apply_gradients(grads_and_vars)
sess.run(tf.global_variables_initializer())
old_vars = sess.run(all_vars)
sess.run(cov_update_ops)
sess.run(inv_update_ops)
sess.run(op)
new_vars = sess.run(all_vars)
for old_var, new_var in zip(old_vars, new_vars):
self.assertNotEqual(old_var, new_var)
if __name__ == '__main__':
tf.disable_v2_behavior()
tf.test.main()
|
bf45c66d8791b6ab6d9d05914e4dd10a9326a7bf
|
36437b397a855f3986325f1bfe41d7ced00b703a
|
/tests/hierarchies/test_name_harmonizing.py
|
e1ced91a83f819a0be5749d8f406ac8d1fdf2f25
|
[
"MIT"
] |
permissive
|
nolar/kopf
|
090cd21550e3a86e512a4c9150dfcf5f59ac14e4
|
538df59b88d1aab7b985d703483497f73c6c4783
|
refs/heads/main
| 2023-08-29T20:39:07.128912
| 2023-08-24T15:47:40
| 2023-08-24T15:47:40
| 288,234,242
| 1,627
| 154
|
MIT
| 2023-09-14T12:31:33
| 2020-08-17T16:45:35
|
Python
|
UTF-8
|
Python
| false
| false
| 11,783
|
py
|
test_name_harmonizing.py
|
import copy
import pytest
import kopf
forced_mode = pytest.mark.parametrize('forcedness', [
pytest.param(dict(forced=True), id='forcedTrue'),
])
non_forced_mode = pytest.mark.parametrize('forcedness', [
pytest.param(dict(forced=False), id='forcedFalse'),
pytest.param(dict(), id='forcedAbsent'),
])
any_forced_mode = pytest.mark.parametrize('forcedness', [
pytest.param(dict(forced=True), id='forcedTrue'),
pytest.param(dict(forced=False), id='forcedFalse'),
pytest.param(dict(), id='forcedAbsent'),
])
strict_mode = pytest.mark.parametrize('strictness', [
pytest.param(dict(strict=True), id='strictTrue'),
])
non_strict_mode = pytest.mark.parametrize('strictness', [
pytest.param(dict(strict=False), id='strictFalse'),
pytest.param(dict(), id='strictAbsent'),
])
any_strict_mode = pytest.mark.parametrize('strictness', [
pytest.param(dict(strict=True), id='strictTrue'),
pytest.param(dict(strict=False), id='strictFalse'),
pytest.param(dict(), id='strictAbsent'),
])
obj1_with_names = pytest.mark.parametrize('obj1', [
pytest.param({'metadata': {'name': 'a'}}, id='regularname'),
pytest.param({'metadata': {'generateName': 'b'}}, id='generatename'),
pytest.param({'metadata': {'name': 'c', 'generateName': 'd'}}, id='bothnames'),
])
obj2_with_names = pytest.mark.parametrize('obj2', [
pytest.param({'metadata': {'name': 'a'}}, id='regularname'),
pytest.param({'metadata': {'generateName': 'b'}}, id='generatename'),
pytest.param({'metadata': {'name': 'c', 'generateName': 'd'}}, id='bothnames'),
])
obj1_without_names = pytest.mark.parametrize('obj1', [
pytest.param({}, id='withoutmeta'),
pytest.param({'metadata': {}}, id='withmeta'),
])
obj2_without_names = pytest.mark.parametrize('obj2', [
pytest.param({}, id='withoutmeta'),
pytest.param({'metadata': {}}, id='withmeta'),
])
# In the NON-FORCED mode, the EXISTING names are preserved.
# The strictness is not involved due to this (no new names added).
@obj1_with_names
@any_strict_mode
@non_forced_mode
def test_preserved_name_of_dict(forcedness, strictness, obj1):
obj1 = copy.deepcopy(obj1)
kopf.harmonize_naming(obj1, name='provided-name', **forcedness, **strictness)
assert obj1['metadata'].get('name') != 'provided-name'
assert obj1['metadata'].get('generateName') != 'provided-name'
@obj2_with_names
@obj1_with_names
@any_strict_mode
@non_forced_mode
def test_preserved_names_of_dicts(forcedness, strictness, multicls, obj1, obj2):
obj1, obj2 = copy.deepcopy(obj1), copy.deepcopy(obj2)
objs = multicls([obj1, obj2])
kopf.harmonize_naming(objs, name='provided-name', **forcedness, **strictness)
assert obj1['metadata'].get('name') != 'provided-name'
assert obj2['metadata'].get('name') != 'provided-name'
assert obj1['metadata'].get('generateName') != 'provided-name'
assert obj2['metadata'].get('generateName') != 'provided-name'
@obj1_with_names
@any_strict_mode
@non_forced_mode
def test_preserved_names_of_pykube_object(forcedness, strictness, pykube_object, obj1):
pykube_object.obj = copy.deepcopy(obj1)
kopf.harmonize_naming(pykube_object, name='provided-name', **forcedness, **strictness)
assert pykube_object.obj['metadata'].get('name') != 'provided-name'
assert pykube_object.obj['metadata'].get('generateName') != 'provided-name'
@obj1_with_names
@any_strict_mode
@non_forced_mode
def test_preserved_names_of_kubernetes_model(forcedness, strictness, kubernetes_model, obj1):
kubernetes_model.metadata.name = obj1.get('metadata', {}).get('name')
kubernetes_model.metadata.generate_name = obj1.get('metadata', {}).get('generateName')
kopf.harmonize_naming(kubernetes_model, name='provided-name', **forcedness, **strictness)
assert kubernetes_model.metadata.name != 'provided-name'
assert kubernetes_model.metadata.generate_name != 'provided-name'
# In the FORCED mode, the EXISTING names are overwritten.
# It only depends which of the names -- regular or generated -- is left.
@obj1_with_names
@strict_mode
@forced_mode
def test_overwriting_of_strict_name_of_dict(forcedness, strictness, obj1):
obj1 = copy.deepcopy(obj1)
kopf.harmonize_naming(obj1, name='provided-name', **forcedness, **strictness)
assert 'name' in obj1['metadata']
assert 'generateName' not in obj1['metadata']
assert obj1['metadata']['name'] == 'provided-name'
@obj2_with_names
@obj1_with_names
@strict_mode
@forced_mode
def test_overwriting_of_strict_names_of_dicts(forcedness, strictness, multicls, obj1, obj2):
obj1, obj2 = copy.deepcopy(obj1), copy.deepcopy(obj2)
objs = multicls([obj1, obj2])
kopf.harmonize_naming(objs, name='provided-name', **forcedness, **strictness)
assert 'name' in obj1['metadata']
assert 'name' in obj2['metadata']
assert 'generateName' not in obj1['metadata']
assert 'generateName' not in obj2['metadata']
assert obj2['metadata']['name'] == 'provided-name'
assert obj1['metadata']['name'] == 'provided-name'
@obj1_with_names
@strict_mode
@forced_mode
def test_overwriting_of_strict_name_of_pykube_object(forcedness, strictness, pykube_object, obj1):
pykube_object.obj = copy.deepcopy(obj1)
kopf.harmonize_naming(pykube_object, name='provided-name', **forcedness, **strictness)
assert pykube_object.obj['metadata'].get('name') == 'provided-name'
assert pykube_object.obj['metadata'].get('generateName') is None
@obj1_with_names
@strict_mode
@forced_mode
def test_overwriting_of_strict_name_of_kubernetes_model(forcedness, strictness, kubernetes_model, obj1):
kubernetes_model.metadata.name = obj1.get('metadata', {}).get('name')
kubernetes_model.metadata.generate_name = obj1.get('metadata', {}).get('generateName')
kopf.harmonize_naming(kubernetes_model, name='provided-name', **forcedness, **strictness)
assert kubernetes_model.metadata.name == 'provided-name'
assert kubernetes_model.metadata.generate_name is None
@obj1_with_names
@non_strict_mode
@forced_mode
def test_overwriting_of_relaxed_name_of_dict(forcedness, strictness, obj1):
obj1 = copy.deepcopy(obj1)
kopf.harmonize_naming(obj1, name='provided-name', **forcedness, **strictness)
assert 'name' not in obj1['metadata']
assert 'generateName' in obj1['metadata']
assert obj1['metadata']['generateName'] == 'provided-name-'
@obj2_with_names
@obj1_with_names
@non_strict_mode
@forced_mode
def test_overwriting_of_relaxed_names_of_dicts(forcedness, strictness, multicls, obj1, obj2):
obj1, obj2 = copy.deepcopy(obj1), copy.deepcopy(obj2)
objs = multicls([obj1, obj2])
kopf.harmonize_naming(objs, name='provided-name', **forcedness, **strictness)
assert 'name' not in obj1['metadata']
assert 'name' not in obj2['metadata']
assert 'generateName' in obj1['metadata']
assert 'generateName' in obj2['metadata']
assert obj1['metadata']['generateName'] == 'provided-name-'
assert obj2['metadata']['generateName'] == 'provided-name-'
@obj1_with_names
@non_strict_mode
@forced_mode
def test_overwriting_of_relaxed_name_of_pykube_object(forcedness, strictness, pykube_object, obj1):
pykube_object.obj = copy.deepcopy(obj1)
kopf.harmonize_naming(pykube_object, name='provided-name', **forcedness, **strictness)
assert pykube_object.obj['metadata'].get('name') is None
assert pykube_object.obj['metadata'].get('generateName') == 'provided-name-'
@obj1_with_names
@non_strict_mode
@forced_mode
def test_overwriting_of_relaxed_name_of_kubernetes_model(forcedness, strictness, kubernetes_model, obj1):
kubernetes_model.metadata.name = obj1.get('metadata', {}).get('name')
kubernetes_model.metadata.generate_name = obj1.get('metadata', {}).get('generateName')
kopf.harmonize_naming(kubernetes_model, name='provided-name', **forcedness, **strictness)
assert kubernetes_model.metadata.name is None
assert kubernetes_model.metadata.generate_name == 'provided-name-'
# When names are ABSENT, they are added regardless of the forced mode.
# The only varying part is which name is added: regular or generated.
@obj1_without_names
@strict_mode
@any_forced_mode
def test_assignment_of_strict_name_of_dict(forcedness, strictness, obj1):
obj1 = copy.deepcopy(obj1)
kopf.harmonize_naming(obj1, name='provided-name', **forcedness, **strictness)
assert 'name' in obj1['metadata']
assert 'generateName' not in obj1['metadata']
assert obj1['metadata']['name'] == 'provided-name'
@obj2_without_names
@obj1_without_names
@strict_mode
@any_forced_mode
def test_assignment_of_strict_names_of_dicts(forcedness, strictness, multicls, obj1, obj2):
obj1, obj2 = copy.deepcopy(obj1), copy.deepcopy(obj2)
objs = multicls([obj1, obj2])
kopf.harmonize_naming(objs, name='provided-name', **forcedness, **strictness)
assert 'name' in obj1['metadata']
assert 'name' in obj2['metadata']
assert 'generateName' not in obj1['metadata']
assert 'generateName' not in obj2['metadata']
assert obj1['metadata']['name'] == 'provided-name'
assert obj2['metadata']['name'] == 'provided-name'
@obj1_without_names
@strict_mode
@any_forced_mode
def test_assignment_of_strict_name_of_pykube_object(forcedness, strictness, pykube_object, obj1):
pykube_object.obj = copy.deepcopy(obj1)
kopf.harmonize_naming(pykube_object, name='provided-name', **forcedness, **strictness)
assert pykube_object.obj['metadata'].get('name') == 'provided-name'
assert pykube_object.obj['metadata'].get('generateName') is None
@strict_mode
@any_forced_mode
def test_assignment_of_strict_name_of_kubernetes_model(forcedness, strictness, kubernetes_model):
kubernetes_model.metadata = None
kopf.harmonize_naming(kubernetes_model, name='provided-name', **forcedness, **strictness)
assert kubernetes_model.metadata.name == 'provided-name'
assert kubernetes_model.metadata.generate_name is None
@obj1_without_names
@non_strict_mode
@any_forced_mode
def test_assignment_of_nonstrict_name_of_dict(forcedness, strictness, obj1):
obj1 = copy.deepcopy(obj1)
kopf.harmonize_naming(obj1, name='provided-name', **forcedness, **strictness)
assert 'name' not in obj1['metadata']
assert 'generateName' in obj1['metadata']
assert obj1['metadata']['generateName'] == 'provided-name-'
@obj2_without_names
@obj1_without_names
@non_strict_mode
@any_forced_mode
def test_assignment_of_nonstrict_names_of_dicts(forcedness, strictness, multicls, obj1, obj2):
obj1, obj2 = copy.deepcopy(obj1), copy.deepcopy(obj2)
objs = multicls([obj1, obj2])
kopf.harmonize_naming(objs, name='provided-name', **forcedness, **strictness)
assert 'name' not in obj1['metadata']
assert 'name' not in obj2['metadata']
assert 'generateName' in obj1['metadata']
assert 'generateName' in obj2['metadata']
assert obj1['metadata']['generateName'] == 'provided-name-'
assert obj2['metadata']['generateName'] == 'provided-name-'
@obj1_without_names
@non_strict_mode
@any_forced_mode
def test_assignment_of_nonstrict_name_of_pykube_object(forcedness, strictness, pykube_object, obj1):
pykube_object.obj = copy.deepcopy(obj1)
kopf.harmonize_naming(pykube_object, name='provided-name', **forcedness, **strictness)
assert pykube_object.obj['metadata'].get('name') is None
assert pykube_object.obj['metadata'].get('generateName') == 'provided-name-'
@non_strict_mode
@any_forced_mode
def test_assignment_of_nonstrict_name_of_kubernetes_model(forcedness, strictness, kubernetes_model):
kubernetes_model.metadata = None
kopf.harmonize_naming(kubernetes_model, name='provided-name', **forcedness, **strictness)
assert kubernetes_model.metadata.name is None
assert kubernetes_model.metadata.generate_name == 'provided-name-'
|
275b1368601cf5e574429a70d22560584d449283
|
13800b7827598e76428a335559b7bf11867ec2f0
|
/examples/py/kraken-conditional-close-order.py
|
c865a3a520251ad52ade030d4f432325cab711cd
|
[
"MIT"
] |
permissive
|
ccxt/ccxt
|
b40a0466f5c430a3c0c6026552ae697aa80ba6c6
|
e4065f6a490e6fc4dd7a72b375428b2faa570668
|
refs/heads/master
| 2023-09-04T03:41:29.787733
| 2023-09-03T19:25:57
| 2023-09-03T19:25:57
| 91,253,698
| 30,798
| 8,190
|
MIT
| 2023-09-14T21:59:09
| 2017-05-14T15:41:56
|
Python
|
UTF-8
|
Python
| false
| false
| 938
|
py
|
kraken-conditional-close-order.py
|
# -*- coding: utf-8 -*-
import os
import sys
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(root + '/python')
import ccxt # noqa: E402
from pprint import pprint
exchange = ccxt.kraken({
# 'apiKey': 'YOUR_API_KEY',
# 'secret': 'YOUR_SECRET',
})
markets = exchange.load_markets()
exchange.verbose = True
symbol = 'XMR/USD'
ticker = exchange.fetch_ticker(symbol)
last_price = ticker['last']
# extra params and overrides
params = {
'close': {
'ordertype': 'limit',
'price': last_price * 1.3,
}
}
amount = 0.05
price = last_price * 0.7
order = exchange.create_order(symbol, 'limit', 'buy', amount, price, params)
print('Created order:')
pprint(order)
fetched_order = exchange.fetch_order(order['id'])
print('Fetched order:')
pprint(fetched_order)
canceled_order = exchange.cancel_order(order['id'])
print('Canceled order:')
pprint(canceled_order)
|
31bcff02c841198c59c941a809893346f36e9e5c
|
ef1def58b933921ccf31bece9fc6eb5f7ffb9a18
|
/tensorhive/models/RestrictionSchedule.py
|
8ab1ba0b6e266b25d64e9dc40189a615b6b10f22
|
[
"Apache-2.0"
] |
permissive
|
roscisz/TensorHive
|
4b33acd727e0b294a4a12af972c471e1254136aa
|
5b50245d285618044a9a71c06ea5361a48ad4acb
|
refs/heads/master
| 2023-03-10T05:09:08.874394
| 2022-02-02T11:08:21
| 2022-02-02T11:08:21
| 98,513,283
| 153
| 26
|
Apache-2.0
| 2023-03-01T02:26:54
| 2017-07-27T08:37:35
|
Python
|
UTF-8
|
Python
| false
| false
| 4,695
|
py
|
RestrictionSchedule.py
|
import re
import datetime
import logging
from sqlalchemy import Column, Integer, String, Time, ForeignKey
from sqlalchemy.orm import relationship, backref
from sqlalchemy.ext.hybrid import hybrid_property
from tensorhive.database import Base
from tensorhive.models.CRUDModel import CRUDModel
from tensorhive.utils.Weekday import Weekday
from typing import List, Union
log = logging.getLogger(__name__)
class RestrictionSchedule(CRUDModel, Base): # type: ignore
"""
Class representing restriction schedules.
One schedule is able to specify same start and end hours for specified days of the week.
If one would want to use different hours for different days, he would need to create separate schedules for these
hours.
For example, if one wants to create schedule active on Mondays and Wednesdays between 8 and 11 am and on Tuesdays
between 2 and 4 am, he would need to create two schedules:
- Schedule A - schedule_days=13, hour_start=datetime.time(8, 0, 0), datetime.time=Time(11, 0, 0)
- Schedule B = schedule_days=2, hour_start=datetime.time(2, 0, 0), datetime.time=Time(4, 0, 0)
Note: All times and dates used are UTC.
"""
__tablename__ = 'restriction_schedules'
__table_args__ = {'sqlite_autoincrement': True}
__public__ = ['id']
id = Column(Integer, primary_key=True, autoincrement=True)
_schedule_days = Column('schedule_days', String(7), nullable=False)
hour_start = Column(Time(), nullable=False)
hour_end = Column(Time(), nullable=False)
_restrictions = relationship('Restriction', secondary='restriction2schedule', back_populates='_schedules',
viewonly=True)
def __init__(self, schedule_days: Union[List[Weekday], str], hour_start: datetime.time, hour_end: datetime.time):
self.schedule_days = schedule_days
self.hour_start = hour_start
self.hour_end = hour_end
def __repr__(self):
return '''<RestrictionSchedule id={id}
schedule_days={schedule_days}
hour_start={hour_start}
hour_end={hour_end}'''\
.format(id=self.id, schedule_days=self.schedule_days, hour_start=self.hour_start, hour_end=self.hour_end)
def check_assertions(self):
assert self.is_valid_schedule_expression(self.schedule_days), '''
schedule_days does not contain valid schedule expression - it should consist of numbers from 1 to 7 inclusive,
each representing day of the week that the schedule is valid on (1 - Monday, 2 - Tuesday, ..., 7 - Sunday).
'''
@hybrid_property
def schedule_days(self):
return self._schedule_days
@hybrid_property
def restrictions(self):
return self._restrictions
@schedule_days.setter # type: ignore
def schedule_days(self, days: Union[List[Weekday], str]):
if isinstance(days, str):
self._schedule_days = (''.join(sorted(days)))
else:
self._schedule_days = self.stringify_schedule_list(days)
@property
def is_active(self):
today = str(datetime.datetime.utcnow().date().weekday() + 1) # weekday uses 0-6 for day numbering, we use 1-7
now = datetime.datetime.utcnow().time()
return today in self.schedule_days and self.hour_start <= now < self.hour_end
@staticmethod
def is_valid_schedule_expression(schedule_expression):
has_repeating_characters = len(set(i for i in schedule_expression if schedule_expression.count(i) > 1)) > 0
regex_match = re.fullmatch('[1-7]{1,7}', schedule_expression) is not None
return regex_match and not has_repeating_characters
def as_dict(self, include_private=False):
ret = super(RestrictionSchedule, self).as_dict(include_private=include_private)
ret['scheduleDays'] = [day.to_str() for day in self.parse_schedule_string(self.schedule_days)]
ret['hourStart'] = self.hour_start.strftime('%H:%M')
ret['hourEnd'] = self.hour_end.strftime('%H:%M')
return ret
@staticmethod
def parse_schedule_string(schedule: str) -> List[Weekday]:
return [Weekday(int(day)) for day in sorted(schedule)]
@staticmethod
def stringify_schedule_list(schedule: List[Weekday]) -> str:
return ''.join((sorted([str(day.value) for day in schedule])))
class Restriction2Schedule(Base): # type: ignore
__tablename__ = 'restriction2schedule'
__tableargs__ = {'sqlite_autoincrement': True}
restriction_id = Column(Integer, ForeignKey('restrictions.id', ondelete='CASCADE'), primary_key=True)
schedule_id = Column(Integer, ForeignKey('restriction_schedules.id', ondelete='CASCADE'), primary_key=True)
|
a3558b65bfac161d1de03a1d46d7d8a4f330d5ff
|
ff443629c167f318d071f62886581167c51690c4
|
/contrib/verify-binaries/test.py
|
22d718ece334aa99d33ca1c9f813847fd394c33e
|
[
"MIT"
] |
permissive
|
bitcoin/bitcoin
|
a618b2555d9fe5a2b613e5fec0f4b1eca3b4d86f
|
6f03c45f6bb5a6edaa3051968b6a1ca4f84d2ccb
|
refs/heads/master
| 2023-09-05T00:16:48.295861
| 2023-09-02T17:43:00
| 2023-09-02T17:46:33
| 1,181,927
| 77,104
| 33,708
|
MIT
| 2023-09-14T20:47:31
| 2010-12-19T15:16:43
|
C++
|
UTF-8
|
Python
| false
| false
| 2,206
|
py
|
test.py
|
#!/usr/bin/env python3
import json
import sys
import subprocess
from pathlib import Path
def main():
"""Tests ordered roughly from faster to slower."""
expect_code(run_verify("", "pub", '0.32'), 4, "Nonexistent version should fail")
expect_code(run_verify("", "pub", '0.32.awefa.12f9h'), 11, "Malformed version should fail")
expect_code(run_verify('--min-good-sigs 20', "pub", "22.0"), 9, "--min-good-sigs 20 should fail")
print("- testing verification (22.0)", flush=True)
_220 = run_verify("--json", "pub", "22.0")
try:
result = json.loads(_220.stdout.decode())
except Exception:
print("failed on 22.0 --json:")
print_process_failure(_220)
raise
expect_code(_220, 0, "22.0 should succeed")
v = result['verified_binaries']
assert result['good_trusted_sigs']
assert v['bitcoin-22.0-aarch64-linux-gnu.tar.gz'] == 'ac718fed08570a81b3587587872ad85a25173afa5f9fbbd0c03ba4d1714cfa3e'
assert v['bitcoin-22.0-osx64.tar.gz'] == '2744d199c3343b2d94faffdfb2c94d75a630ba27301a70e47b0ad30a7e0155e9'
assert v['bitcoin-22.0-x86_64-linux-gnu.tar.gz'] == '59ebd25dd82a51638b7a6bb914586201e67db67b919b2a1ff08925a7936d1b16'
def run_verify(global_args: str, command: str, command_args: str) -> subprocess.CompletedProcess:
maybe_here = Path.cwd() / 'verify.py'
path = maybe_here if maybe_here.exists() else Path.cwd() / 'contrib' / 'verify-binaries' / 'verify.py'
if command == "pub":
command += " --cleanup"
return subprocess.run(
f"{path} {global_args} {command} {command_args}",
stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
def expect_code(completed: subprocess.CompletedProcess, expected_code: int, msg: str):
if completed.returncode != expected_code:
print(f"{msg!r} failed: got code {completed.returncode}, expected {expected_code}")
print_process_failure(completed)
sys.exit(1)
else:
print(f"✓ {msg!r} passed")
def print_process_failure(completed: subprocess.CompletedProcess):
print(f"stdout:\n{completed.stdout.decode()}")
print(f"stderr:\n{completed.stderr.decode()}")
if __name__ == '__main__':
main()
|
65806eb4b6ea7b1dc69d2421158c455735b6275c
|
7f620e7902c0b9ccb1fcfd1427acd5936ea33814
|
/tests/package/packagers_testers/default_packager_tester.py
|
617ff2698028186ac5cac690364ee6e55d559f2d
|
[
"Apache-2.0"
] |
permissive
|
mlrun/mlrun
|
2074c230070129ce3becb211b92c90b29a2ce850
|
b5fe0c05ae7f5818a4a5a5a40245c851ff9b2c77
|
refs/heads/development
| 2023-09-06T00:09:21.546135
| 2023-09-05T19:38:13
| 2023-09-05T19:38:13
| 205,706,595
| 1,093
| 229
|
Apache-2.0
| 2023-09-14T14:14:10
| 2019-09-01T16:59:19
|
Python
|
UTF-8
|
Python
| false
| false
| 2,228
|
py
|
default_packager_tester.py
|
# Copyright 2023 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import tempfile
from typing import Tuple
import cloudpickle
from mlrun.package import DefaultPackager
from tests.package.packager_tester import (
COMMON_OBJECT_INSTRUCTIONS,
NewClass,
PackagerTester,
PackTest,
PackToUnpackTest,
UnpackTest,
)
def pack_some_class() -> NewClass:
return NewClass(a=1, b=2, c=3)
def unpack_some_class(obj: NewClass):
assert type(obj).__name__ == NewClass.__name__
assert obj == NewClass(a=1, b=2, c=3)
def validate_some_class_result(result: str) -> bool:
return result == "6"
def prepare_new_class() -> Tuple[str, str]:
temp_directory = tempfile.mkdtemp()
pkl_path = os.path.join(temp_directory, "my_class.pkl")
some_class = NewClass(a=1, b=2, c=3)
with open(pkl_path, "wb") as pkl_file:
cloudpickle.dump(some_class, pkl_file)
return pkl_path, temp_directory
class DefaultPackagerTester(PackagerTester):
"""
A tester for the `DefaultPackager`.
"""
PACKAGER_IN_TEST = DefaultPackager
TESTS = [
PackTest(
pack_handler="pack_some_class",
log_hint="my_result : result",
validation_function=validate_some_class_result,
),
UnpackTest(
prepare_input_function=prepare_new_class,
unpack_handler="unpack_some_class",
),
PackToUnpackTest(
pack_handler="pack_some_class",
log_hint="my_object",
expected_instructions={
"object_module_name": "tests",
**COMMON_OBJECT_INSTRUCTIONS,
},
unpack_handler="unpack_some_class",
),
]
|
5260203e8edecdb823ff8ae8f03acade16d72d80
|
7766096d6b2032562304e4b20190256ac2a5023e
|
/xbox/webapi/authentication/manager.py
|
1552e32a19663246e843ab42f215a8cb1ed8c350
|
[
"MIT"
] |
permissive
|
OpenXbox/xbox-webapi-python
|
c9d47be15cebc258ced7afde1ead70b5cd2d7a8c
|
398320c7777475959e4f8c6716297735be04aac4
|
refs/heads/master
| 2023-03-16T23:02:06.051438
| 2022-11-12T15:28:50
| 2022-11-12T15:28:50
| 125,562,423
| 166
| 55
|
MIT
| 2022-11-15T13:31:11
| 2018-03-16T19:43:18
|
Python
|
UTF-8
|
Python
| false
| false
| 5,743
|
py
|
manager.py
|
"""
Authentication Manager
Authenticate with Windows Live Server and Xbox Live.
"""
import logging
from typing import List, Optional
import httpx
from xbox.webapi.authentication.models import (
OAuth2TokenResponse,
XAUResponse,
XSTSResponse,
)
from xbox.webapi.common.exceptions import AuthenticationException
from xbox.webapi.common.signed_session import SignedSession
log = logging.getLogger("authentication")
DEFAULT_SCOPES = ["Xboxlive.signin", "Xboxlive.offline_access"]
class AuthenticationManager:
def __init__(
self,
client_session: SignedSession,
client_id: str,
client_secret: str,
redirect_uri: str,
scopes: Optional[List[str]] = None,
):
if not isinstance(client_session, (SignedSession, httpx.AsyncClient)):
raise DeprecationWarning(
"""Xbox WebAPI changed to use SignedSession (wrapped httpx.AsyncClient).
Please check the documentation"""
)
self.session: SignedSession = client_session
self._client_id: str = client_id
self._client_secret: str = client_secret
self._redirect_uri: str = redirect_uri
self._scopes: List[str] = scopes or DEFAULT_SCOPES
self.oauth: OAuth2TokenResponse = None
self.user_token: XAUResponse = None
self.xsts_token: XSTSResponse = None
def generate_authorization_url(self, state: Optional[str] = None) -> str:
"""Generate Windows Live Authorization URL."""
query_params = {
"client_id": self._client_id,
"response_type": "code",
"approval_prompt": "auto",
"scope": " ".join(self._scopes),
"redirect_uri": self._redirect_uri,
}
if state:
query_params["state"] = state
return str(
httpx.URL(
"https://login.live.com/oauth20_authorize.srf", params=query_params
)
)
async def request_tokens(self, authorization_code: str) -> None:
"""Request all tokens."""
self.oauth = await self.request_oauth_token(authorization_code)
self.user_token = await self.request_user_token()
self.xsts_token = await self.request_xsts_token()
async def refresh_tokens(self) -> None:
"""Refresh all tokens."""
if not (self.oauth and self.oauth.is_valid()):
self.oauth = await self.refresh_oauth_token()
if not (self.user_token and self.user_token.is_valid()):
self.user_token = await self.request_user_token()
if not (self.xsts_token and self.xsts_token.is_valid()):
self.xsts_token = await self.request_xsts_token()
async def request_oauth_token(self, authorization_code: str) -> OAuth2TokenResponse:
"""Request OAuth2 token."""
return await self._oauth2_token_request(
{
"grant_type": "authorization_code",
"code": authorization_code,
"scope": " ".join(self._scopes),
"redirect_uri": self._redirect_uri,
}
)
async def refresh_oauth_token(self) -> OAuth2TokenResponse:
"""Refresh OAuth2 token."""
return await self._oauth2_token_request(
{
"grant_type": "refresh_token",
"scope": " ".join(self._scopes),
"refresh_token": self.oauth.refresh_token,
}
)
async def _oauth2_token_request(self, data: dict) -> OAuth2TokenResponse:
"""Execute token requests."""
data["client_id"] = self._client_id
if self._client_secret:
data["client_secret"] = self._client_secret
resp = await self.session.post(
"https://login.live.com/oauth20_token.srf", data=data
)
resp.raise_for_status()
return OAuth2TokenResponse(**resp.json())
async def request_user_token(
self,
relying_party: str = "http://auth.xboxlive.com",
use_compact_ticket: bool = False,
) -> XAUResponse:
"""Authenticate via access token and receive user token."""
url = "https://user.auth.xboxlive.com/user/authenticate"
headers = {"x-xbl-contract-version": "1"}
data = {
"RelyingParty": relying_party,
"TokenType": "JWT",
"Properties": {
"AuthMethod": "RPS",
"SiteName": "user.auth.xboxlive.com",
"RpsTicket": self.oauth.access_token
if use_compact_ticket
else f"d={self.oauth.access_token}",
},
}
resp = await self.session.post(url, json=data, headers=headers)
resp.raise_for_status()
return XAUResponse(**resp.json())
async def request_xsts_token(
self, relying_party: str = "http://xboxlive.com"
) -> XSTSResponse:
"""Authorize via user token and receive final X token."""
url = "https://xsts.auth.xboxlive.com/xsts/authorize"
headers = {"x-xbl-contract-version": "1"}
data = {
"RelyingParty": relying_party,
"TokenType": "JWT",
"Properties": {
"UserTokens": [self.user_token.token],
"SandboxId": "RETAIL",
},
}
resp = await self.session.post(url, json=data, headers=headers)
if resp.status_code == 401: # if unauthorized
print(
"Failed to authorize you! Your password or username may be wrong or you are trying to use child account (< 18 years old)"
)
raise AuthenticationException()
resp.raise_for_status()
return XSTSResponse(**resp.json())
|
e1b7dd9f08924d980c418de8168ae5f885d34fab
|
ca10e5645aa2e8152d6219d31ac77d3ed50096c0
|
/bindings/python/capstone/riscv.py
|
ca09db61f337d5b892992a3bd9ee29fe5a2cbedc
|
[
"BSD-3-Clause",
"NCSA"
] |
permissive
|
capstone-engine/capstone
|
fc4f1b14eded800818f2ed64eafaf342e6046f9b
|
f036d2dbb6a9f0d1e0dc9c14b4f44878aeed260a
|
refs/heads/next
| 2023-09-02T14:38:15.356818
| 2023-08-30T03:13:17
| 2023-08-30T03:13:17
| 14,735,429
| 1,390
| 292
|
NOASSERTION
| 2023-09-14T20:47:20
| 2013-11-27T02:32:11
|
C
|
UTF-8
|
Python
| false
| false
| 1,055
|
py
|
riscv.py
|
# Capstone Python bindings, by Nguyen Anh Quynnh <aquynh@gmail.com>
import ctypes
from . import copy_ctypes_list
from .riscv_const import *
# define the API
class RISCVOpMem(ctypes.Structure):
_fields_ = (
('base', ctypes.c_uint),
('disp', ctypes.c_int64),
)
class RISCVOpValue(ctypes.Union):
_fields_ = (
('reg', ctypes.c_uint),
('imm', ctypes.c_int64),
('mem', RISCVOpMem),
)
class RISCVOp(ctypes.Structure):
_fields_ = (
('type', ctypes.c_uint),
('value', RISCVOpValue),
)
@property
def imm(self):
return self.value.imm
@property
def reg(self):
return self.value.reg
@property
def mem(self):
return self.value.mem
class CsRISCV(ctypes.Structure):
_fields_ = (
('need_effective_addr', ctypes.c_bool),
('op_count', ctypes.c_uint8),
('operands', RISCVOp * 8),
)
def get_arch_info(a):
return (a.need_effective_addr, copy_ctypes_list(a.operands[:a.op_count]))
|
8645a3235a53e541f664c2c9abc0e53eb2fe8a1a
|
3982e6daf88e453c726f6b39a081fc37ce15a08a
|
/discovery-provider/src/challenges/trending_challenge.py
|
1d6590ef0644086277623205298fd802e9bc221f
|
[
"Apache-2.0"
] |
permissive
|
AudiusProject/audius-protocol
|
45808e11082608ad5b76a425d287cb6d94a6dab0
|
7cf1d8e378520460d24a7cc8c29e9927c0944cb3
|
refs/heads/main
| 2023-08-09T10:34:28.850436
| 2023-08-09T04:28:17
| 2023-08-09T04:28:17
| 201,821,771
| 531
| 108
|
NOASSERTION
| 2023-09-14T21:27:52
| 2019-08-11T22:31:43
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 3,497
|
py
|
trending_challenge.py
|
import logging
from datetime import date, datetime, timedelta
from typing import Dict, List, Optional, Tuple
import pytz
from sqlalchemy import desc
from sqlalchemy.orm.session import Session
from src.challenges.challenge import (
ChallengeManager,
ChallengeUpdater,
FullEventMetadata,
)
from src.models.rewards.user_challenge import UserChallenge
from src.models.tracks.trending_result import TrendingResult
logger = logging.getLogger(__name__)
class TrendingChallengeUpdater(ChallengeUpdater):
"""Updates the trending track challenge."""
def update_user_challenges(
self,
session: Session,
event: str,
user_challenges: List[UserChallenge],
step_count: Optional[int],
event_metadatas: List[FullEventMetadata],
starting_block: Optional[int],
):
# Update the user_challenges
for user_challenge in user_challenges:
# Update completion
user_challenge.is_complete = True
def on_after_challenge_creation(self, session, metadatas: List[FullEventMetadata]):
trending_results = [
TrendingResult(
user_id=metadata["extra"]["user_id"],
id=metadata["extra"]["id"],
rank=metadata["extra"]["rank"],
type=metadata["extra"]["type"],
version=metadata["extra"]["version"],
week=metadata["extra"]["week"],
)
for metadata in metadatas
]
session.add_all(trending_results)
def generate_specifier(self, user_id: int, extra: Dict) -> str:
return f"{extra['week']}:{extra['rank']}"
trending_track_challenge_manager = ChallengeManager("tt", TrendingChallengeUpdater())
trending_underground_track_challenge_manager = ChallengeManager(
"tut", TrendingChallengeUpdater()
)
trending_playlist_challenge_manager = ChallengeManager("tp", TrendingChallengeUpdater())
def is_dst(zonename, dt):
"""Checks if is daylight savings time
During daylight savings, the clock moves forward one hr
"""
tz = pytz.timezone(zonename)
localized = pytz.utc.localize(dt)
return localized.astimezone(tz).dst() != timedelta(0)
def get_is_valid_timestamp(dt: datetime):
isFriday = dt.weekday() == 4
# Check timestamp to be between 12pm and 1pm PT
add_hr = is_dst("America/Los_Angeles", dt)
min = 19 if add_hr else 20
max = 20 if add_hr else 21
isWithinHourMargin = dt.hour >= min and dt.hour < max
return isFriday and isWithinHourMargin
def should_trending_challenge_update(
session: Session, timestamp: int
) -> Tuple[bool, Optional[date]]:
"""Checks if the timestamp is after a week and there is no pending trending update
Returns a tuple of boolean if the challenge should be updated, and if it's set to true, the date
"""
dt = datetime.fromtimestamp(timestamp)
is_valid_timestamp = get_is_valid_timestamp(dt)
if not is_valid_timestamp:
return (False, None)
# DB query for most recent db row of trending's date
# using that, figure out new date threshold -> next friday at noon
most_recent_user_challenge = (
session.query(TrendingResult.week).order_by(desc(TrendingResult.week)).first()
)
if most_recent_user_challenge is None:
# do somthing
return (True, dt.date())
week = most_recent_user_challenge[0]
if week == dt.date():
return (False, None)
return (True, dt.date())
|
751a5e1ad6ca056c402dd4d004401a2069e643bd
|
a9f5549d90e49c62c1198a3a54a70370e6f7256c
|
/setup.py
|
49365bd706ed4d13ecb718b8bb444eb31f48a97b
|
[
"BSD-3-Clause",
"Python-2.0"
] |
permissive
|
Blosc/python-blosc
|
cbe07af0b3c52ee4dcf0c43d0de5b83c7cd117e4
|
99128d6d3f988546e0ca03b3bc431eb9d377bec4
|
refs/heads/main
| 2023-07-01T10:42:32.301766
| 2023-05-01T11:38:40
| 2023-05-01T11:38:40
| 951,194
| 288
| 73
|
NOASSERTION
| 2023-09-04T19:55:16
| 2010-09-30T12:56:53
|
Python
|
UTF-8
|
Python
| false
| false
| 3,686
|
py
|
setup.py
|
########################################################################
#
# License: BSD 3-clause
# Created: September 22, 2010
# Author: The Blosc development team
#
########################################################################
# flake8: noqa
import os
import sys
from skbuild import setup
from textwrap import dedent
if __name__ == '__main__':
try:
import cpuinfo
cpu_info = cpuinfo.get_cpu_info()
except Exception:
# newer cpuinfo versions fail to import on unsupported architectures
cpu_info = None
########### Check versions ##########
def exit_with_error(message):
print('ERROR: %s' % message)
sys.exit(1)
########### End of checks ##########
# Read the long_description from README.rst
with open('README.rst') as f:
long_description = f.read()
# Blosc version
with open('VERSION') as f:
VERSION = f.read().strip()
# Create the version.py file
with open('blosc/version.py', 'w') as f:
f.write('__version__ = "%s"\n' % VERSION)
def cmake_bool(cond):
return 'ON' if cond else 'OFF'
classifiers = dedent("""\
Development Status :: 5 - Production/Stable
Intended Audience :: Developers
Intended Audience :: Information Technology
Intended Audience :: Science/Research
License :: OSI Approved :: BSD License
Programming Language :: Python :: 3
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9
Programming Language :: Python :: 3.10
Programming Language :: Python :: 3.11
Programming Language :: Python :: 3 :: Only
Topic :: Software Development :: Libraries :: Python Modules
Topic :: System :: Archiving :: Compression
Operating System :: Microsoft :: Windows
Operating System :: Unix
""")
setup(name = "blosc",
version = VERSION,
description = 'Blosc data compressor',
long_description = long_description,
classifiers = [c for c in classifiers.split("\n") if c],
python_requires=">=3.8",
author = 'The Blosc development team',
author_email = 'blosc@blosc.org',
maintainer = 'The Blosc development team',
maintainer_email = 'blosc@blosc.org',
url = 'https://github.com/blosc/python-blosc',
license = 'https://opensource.org/licenses/BSD-3-Clause',
platforms = ['any'],
cmake_args = (
['-DUSE_SYSTEM_BLOSC:BOOL=ON'] if int(os.environ.get('USE_SYSTEM_BLOSC', '0'))
else [
'-DUSE_SYSTEM_BLOSC:BOOL=OFF',
'-DDEACTIVATE_SSE2:BOOL=%s' % cmake_bool(('DISABLE_BLOSC_SSE2' in os.environ) or (cpu_info is None) or ('sse2' not in cpu_info['flags'])),
'-DDEACTIVATE_AVX2:BOOL=%s' % cmake_bool(('DISABLE_BLOSC_AVX2' in os.environ) or (cpu_info is None) or ('avx2' not in cpu_info['flags'])),
'-DDEACTIVATE_LZ4:BOOL=%s' % cmake_bool(not int(os.environ.get('INCLUDE_LZ4', '1'))),
# Snappy is disabled by default
'-DDEACTIVATE_SNAPPY:BOOL=%s' % cmake_bool(not int(os.environ.get('INCLUDE_SNAPPY', '0'))),
'-DDEACTIVATE_ZLIB:BOOL=%s' % cmake_bool(not int(os.environ.get('INCLUDE_ZLIB', '1'))),
'-DDEACTIVATE_ZSTD:BOOL=%s' % cmake_bool(not int(os.environ.get('INCLUDE_ZSTD', '1'))),
]),
setup_requires=['scikit-build'],
tests_require=['numpy', 'psutil'],
packages = ['blosc'],
)
elif __name__ == '__mp_main__':
# This occurs from `cpuinfo 4.0.0` using multiprocessing to interrogate the
# CPUID flags
# https://github.com/workhorsy/py-cpuinfo/issues/108
pass
|
c16531f3d2947cfc49eba48520f2d9c7dcaf6f30
|
c50e7eb190802d7849c0d0cea02fb4d2f0021777
|
/src/securityinsight/azext_sentinel/aaz/latest/sentinel/entity_query/_create.py
|
edba2948fc2bdddd4b0528db7413fbebe72ee54e
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
Azure/azure-cli-extensions
|
c1615b19930bba7166c282918f166cd40ff6609c
|
b8c2cf97e991adf0c0a207d810316b8f4686dc29
|
refs/heads/main
| 2023-08-24T12:40:15.528432
| 2023-08-24T09:17:25
| 2023-08-24T09:17:25
| 106,580,024
| 336
| 1,226
|
MIT
| 2023-09-14T10:48:57
| 2017-10-11T16:27:31
|
Python
|
UTF-8
|
Python
| false
| false
| 16,545
|
py
|
_create.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"sentinel entity-query create",
is_experimental=True,
)
class Create(AAZCommand):
"""Create the entity query.
"""
_aaz_info = {
"version": "2022-06-01-preview",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.operationalinsights/workspaces/{}/providers/microsoft.securityinsights/entityqueries/{}", "2022-06-01-preview"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.entity_query_id = AAZStrArg(
options=["-n", "--name", "--entity-query-id"],
help="entity query ID",
required=True,
id_part="child_name_1",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.workspace_name = AAZStrArg(
options=["-w", "--workspace-name"],
help="The name of the workspace.",
required=True,
is_experimental=True,
id_part="name",
)
# define Arg Group "EntityQuery"
_args_schema = cls._args_schema
_args_schema.activity = AAZObjectArg(
options=["--activity"],
arg_group="EntityQuery",
)
_args_schema.etag = AAZStrArg(
options=["--etag"],
arg_group="EntityQuery",
help="Etag of the azure resource",
)
activity = cls._args_schema.activity
activity.content = AAZStrArg(
options=["content"],
help="The entity query content to display in timeline",
)
activity.description = AAZStrArg(
options=["description"],
help="The entity query description",
)
activity.enabled = AAZBoolArg(
options=["enabled"],
help="Determines whether this activity is enabled or disabled.",
)
activity.entities_filter = AAZDictArg(
options=["entities-filter"],
help="The query applied only to entities matching to all filters",
)
activity.input_entity_type = AAZStrArg(
options=["input-entity-type"],
help="The type of the query's source entity",
enum={"Account": "Account", "AzureResource": "AzureResource", "CloudApplication": "CloudApplication", "DNS": "DNS", "File": "File", "FileHash": "FileHash", "Host": "Host", "HuntingBookmark": "HuntingBookmark", "IP": "IP", "IoTDevice": "IoTDevice", "MailCluster": "MailCluster", "MailMessage": "MailMessage", "Mailbox": "Mailbox", "Malware": "Malware", "Process": "Process", "RegistryKey": "RegistryKey", "RegistryValue": "RegistryValue", "SecurityAlert": "SecurityAlert", "SecurityGroup": "SecurityGroup", "SubmissionMail": "SubmissionMail", "URL": "URL"},
)
activity.query_definitions = AAZObjectArg(
options=["query-definitions"],
help="The Activity query definitions",
)
activity.required_input_fields_sets = AAZListArg(
options=["required-input-fields-sets"],
help="List of the fields of the source entity that are required to run the query",
)
activity.template_name = AAZStrArg(
options=["template-name"],
help="The template id this activity was created from",
)
activity.title = AAZStrArg(
options=["title"],
help="The entity query title",
)
entities_filter = cls._args_schema.activity.entities_filter
entities_filter.Element = AAZListArg()
_element = cls._args_schema.activity.entities_filter.Element
_element.Element = AAZStrArg()
query_definitions = cls._args_schema.activity.query_definitions
query_definitions.query = AAZStrArg(
options=["query"],
help="The Activity query to run on a given entity",
)
required_input_fields_sets = cls._args_schema.activity.required_input_fields_sets
required_input_fields_sets.Element = AAZListArg()
_element = cls._args_schema.activity.required_input_fields_sets.Element
_element.Element = AAZStrArg()
return cls._args_schema
def _execute_operations(self):
self.EntityQueriesCreateOrUpdate(ctx=self.ctx)()
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class EntityQueriesCreateOrUpdate(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200, 201]:
return self.on_200_201(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/entityQueries/{entityQueryId}",
**self.url_parameters
)
@property
def method(self):
return "PUT"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"entityQueryId", self.ctx.args.entity_query_id,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
**self.serialize_url_param(
"workspaceName", self.ctx.args.workspace_name,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-06-01-preview",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Content-Type", "application/json",
),
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
@property
def content(self):
_content_value, _builder = self.new_content_builder(
self.ctx.args,
typ=AAZObjectType,
typ_kwargs={"flags": {"required": True, "client_flatten": True}}
)
_builder.set_prop("etag", AAZStrType, ".etag")
_builder.set_const("kind", "Activity", AAZStrType, ".activity", typ_kwargs={"flags": {"required": True}})
_builder.discriminate_by("kind", "Activity")
disc_activity = _builder.get("{kind:Activity}")
if disc_activity is not None:
disc_activity.set_prop("properties", AAZObjectType, typ_kwargs={"flags": {"client_flatten": True}})
properties = _builder.get("{kind:Activity}.properties")
if properties is not None:
properties.set_prop("content", AAZStrType, ".activity.content")
properties.set_prop("description", AAZStrType, ".activity.description")
properties.set_prop("enabled", AAZBoolType, ".activity.enabled")
properties.set_prop("entitiesFilter", AAZDictType, ".activity.entities_filter")
properties.set_prop("inputEntityType", AAZStrType, ".activity.input_entity_type")
properties.set_prop("queryDefinitions", AAZObjectType, ".activity.query_definitions")
properties.set_prop("requiredInputFieldsSets", AAZListType, ".activity.required_input_fields_sets")
properties.set_prop("templateName", AAZStrType, ".activity.template_name")
properties.set_prop("title", AAZStrType, ".activity.title")
entities_filter = _builder.get("{kind:Activity}.properties.entitiesFilter")
if entities_filter is not None:
entities_filter.set_elements(AAZListType)
_elements = _builder.get("{kind:Activity}.properties.entitiesFilter{}")
if _elements is not None:
_elements.set_elements(AAZStrType, ".")
query_definitions = _builder.get("{kind:Activity}.properties.queryDefinitions")
if query_definitions is not None:
query_definitions.set_prop("query", AAZStrType, ".query")
required_input_fields_sets = _builder.get("{kind:Activity}.properties.requiredInputFieldsSets")
if required_input_fields_sets is not None:
required_input_fields_sets.set_elements(AAZListType)
_elements = _builder.get("{kind:Activity}.properties.requiredInputFieldsSets[]")
if _elements is not None:
_elements.set_elements(AAZStrType, ".")
return self.serialize_content(_content_value)
def on_200_201(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200_201
)
_schema_on_200_201 = None
@classmethod
def _build_schema_on_200_201(cls):
if cls._schema_on_200_201 is not None:
return cls._schema_on_200_201
cls._schema_on_200_201 = AAZObjectType()
_schema_on_200_201 = cls._schema_on_200_201
_schema_on_200_201.etag = AAZStrType()
_schema_on_200_201.id = AAZStrType(
flags={"read_only": True},
)
_schema_on_200_201.kind = AAZStrType(
flags={"required": True},
)
_schema_on_200_201.name = AAZStrType(
flags={"read_only": True},
)
_schema_on_200_201.system_data = AAZObjectType(
serialized_name="systemData",
flags={"read_only": True},
)
_schema_on_200_201.type = AAZStrType(
flags={"read_only": True},
)
system_data = cls._schema_on_200_201.system_data
system_data.created_at = AAZStrType(
serialized_name="createdAt",
flags={"read_only": True},
)
system_data.created_by = AAZStrType(
serialized_name="createdBy",
flags={"read_only": True},
)
system_data.created_by_type = AAZStrType(
serialized_name="createdByType",
flags={"read_only": True},
)
system_data.last_modified_at = AAZStrType(
serialized_name="lastModifiedAt",
flags={"read_only": True},
)
system_data.last_modified_by = AAZStrType(
serialized_name="lastModifiedBy",
flags={"read_only": True},
)
system_data.last_modified_by_type = AAZStrType(
serialized_name="lastModifiedByType",
flags={"read_only": True},
)
disc_activity = cls._schema_on_200_201.discriminate_by("kind", "Activity")
disc_activity.properties = AAZObjectType(
flags={"client_flatten": True},
)
properties = cls._schema_on_200_201.discriminate_by("kind", "Activity").properties
properties.content = AAZStrType()
properties.created_time_utc = AAZStrType(
serialized_name="createdTimeUtc",
flags={"read_only": True},
)
properties.description = AAZStrType()
properties.enabled = AAZBoolType()
properties.entities_filter = AAZDictType(
serialized_name="entitiesFilter",
)
properties.input_entity_type = AAZStrType(
serialized_name="inputEntityType",
)
properties.last_modified_time_utc = AAZStrType(
serialized_name="lastModifiedTimeUtc",
flags={"read_only": True},
)
properties.query_definitions = AAZObjectType(
serialized_name="queryDefinitions",
)
properties.required_input_fields_sets = AAZListType(
serialized_name="requiredInputFieldsSets",
)
properties.template_name = AAZStrType(
serialized_name="templateName",
)
properties.title = AAZStrType()
entities_filter = cls._schema_on_200_201.discriminate_by("kind", "Activity").properties.entities_filter
entities_filter.Element = AAZListType()
_element = cls._schema_on_200_201.discriminate_by("kind", "Activity").properties.entities_filter.Element
_element.Element = AAZStrType()
query_definitions = cls._schema_on_200_201.discriminate_by("kind", "Activity").properties.query_definitions
query_definitions.query = AAZStrType()
required_input_fields_sets = cls._schema_on_200_201.discriminate_by("kind", "Activity").properties.required_input_fields_sets
required_input_fields_sets.Element = AAZListType()
_element = cls._schema_on_200_201.discriminate_by("kind", "Activity").properties.required_input_fields_sets.Element
_element.Element = AAZStrType()
disc_expansion = cls._schema_on_200_201.discriminate_by("kind", "Expansion")
disc_expansion.properties = AAZObjectType(
flags={"client_flatten": True},
)
properties = cls._schema_on_200_201.discriminate_by("kind", "Expansion").properties
properties.data_sources = AAZListType(
serialized_name="dataSources",
)
properties.display_name = AAZStrType(
serialized_name="displayName",
)
properties.input_entity_type = AAZStrType(
serialized_name="inputEntityType",
)
properties.input_fields = AAZListType(
serialized_name="inputFields",
)
properties.output_entity_types = AAZListType(
serialized_name="outputEntityTypes",
)
properties.query_template = AAZStrType(
serialized_name="queryTemplate",
)
data_sources = cls._schema_on_200_201.discriminate_by("kind", "Expansion").properties.data_sources
data_sources.Element = AAZStrType()
input_fields = cls._schema_on_200_201.discriminate_by("kind", "Expansion").properties.input_fields
input_fields.Element = AAZStrType()
output_entity_types = cls._schema_on_200_201.discriminate_by("kind", "Expansion").properties.output_entity_types
output_entity_types.Element = AAZStrType()
return cls._schema_on_200_201
__all__ = ["Create"]
|
c64f206429a490464a2dd97478119f51af1f3797
|
9e204cdb8d32e3854bb96c8d933f4a5f3a7d00f3
|
/pyredner_tensorflow/object.py
|
e2d81504f51ad20298a74a5fa27719447f22f4b3
|
[
"MIT"
] |
permissive
|
BachiLi/redner
|
81ca08f838cf00e1a31758f3f06a82559a1de7d8
|
f355e8c445b2cba1dd7d75b8e137d736c7270520
|
refs/heads/master
| 2023-08-19T03:16:49.188230
| 2022-03-28T18:57:01
| 2022-03-28T18:57:01
| 157,027,341
| 1,326
| 147
|
MIT
| 2022-08-19T13:09:03
| 2018-11-10T22:32:27
|
NASL
|
UTF-8
|
Python
| false
| false
| 3,681
|
py
|
object.py
|
import pyredner_tensorflow as pyredner
import tensorflow as tf
from typing import Optional
class Object:
"""
Object combines geometry, material, and lighting information
and aggregate them in a single class. This is a convinent class
for constructing redner scenes.
redner supports only triangle meshes for now. It stores a pool of
vertices and access the pool using integer index. Some times the
two vertices can have the same 3D position but different texture
coordinates, because UV mapping creates seams and need to duplicate
vertices. In this can we can use an additional "uv_indices" array
to access the uv pool.
Args
====
vertices: tf.Tensor
3D position of vertices
float32 tensor with size num_vertices x 3
indices: tf.Tensor
vertex indices of triangle faces.
int32 tensor with size num_triangles x 3
material: pyredner.Material
light_intensity: Optional[tf.Tensor]
make this object an area light
float32 tensor with size 3
light_two_sided: boolean
Does the light emit from two sides of the shape?
uvs: Optional[tf.Tensor]:
optional texture coordinates.
float32 tensor with size num_uvs x 2
doesn't need to be the same size with vertices if uv_indices is None
normals: Optional[tf.Tensor]
shading normal
float32 tensor with size num_normals x 3
doesn't need to be the same size with vertices if normal_indices is None
uv_indices: Optional[tf.Tensor]
overrides indices when accessing uv coordinates
int32 tensor with size num_uvs x 3
normal_indices: Optional[tf.Tensor]
overrides indices when accessing shading normals
int32 tensor with size num_normals x 3
colors: Optional[tf.Tensor]
optional per-vertex color
float32 tensor with size num_vertices x 3
directly_visible: boolean
optional setting to see if object is visible to camera
during rendering.
"""
def __init__(self,
vertices: tf.Tensor,
indices: tf.Tensor,
material: pyredner.Material,
light_intensity: Optional[tf.Tensor] = None,
light_two_sided: bool = False,
uvs: Optional[tf.Tensor] = None,
normals: Optional[tf.Tensor] = None,
uv_indices: Optional[tf.Tensor] = None,
normal_indices: Optional[tf.Tensor] = None,
colors: Optional[tf.Tensor] = None,
directly_visible: bool = True):
assert(vertices.dtype == tf.float32)
assert(indices.dtype == tf.int32)
if uvs is not None:
assert(uvs.dtype == tf.float32)
if normals is not None:
assert(normals.dtype == tf.float32)
if uv_indices is not None:
assert(uv_indices.dtype == tf.int32)
if normal_indices is not None:
assert(normal_indices.dtype == tf.int32)
if colors is not None:
assert(colors.dtype == tf.float32)
self.vertices = vertices
self.indices = indices
self.uvs = uvs
self.normals = normals
self.uv_indices = uv_indices
self.normal_indices = normal_indices
self.colors = colors
self.material = material
self.light_intensity = light_intensity
self.light_two_sided = light_two_sided
self.directly_visible = directly_visible
|
0c8b04522cf0d6194f6afc1849d3390528e7aabf
|
8ca19f1a31070738b376c0370c4bebf6b7efcb43
|
/office365/runtime/types/event_handler.py
|
57576ab5085362b7135f94ff2c2c827adac922af
|
[
"MIT"
] |
permissive
|
vgrem/Office365-REST-Python-Client
|
2ef153d737c6ed5445ba1e446aeaec39c4ef4ed3
|
cbd245d1af8d69e013c469cfc2a9851f51c91417
|
refs/heads/master
| 2023-09-02T14:20:40.109462
| 2023-08-31T19:14:05
| 2023-08-31T19:14:05
| 51,305,798
| 1,006
| 326
|
MIT
| 2023-08-28T05:38:02
| 2016-02-08T15:24:51
|
Python
|
UTF-8
|
Python
| false
| false
| 688
|
py
|
event_handler.py
|
class EventHandler:
def __init__(self, once=False):
self._listeners = []
self._once = once
def __contains__(self, e):
return e in self._listeners
def __iter__(self):
return iter(self._listeners)
def __iadd__(self, listener):
self._listeners.append(listener)
return self
def __isub__(self, listener):
self._listeners.remove(listener)
return self
def __len__(self):
return len(self._listeners)
def notify(self, *args, **kwargs):
for listener in self._listeners:
if self._once:
self._listeners.remove(listener)
listener(*args, **kwargs)
|
ee86201f74f4f73b8300f69dbca06d3d92c7cffb
|
8880226d2ca1c9448c44b3e9f21226a58e61ac93
|
/awacs/chatbot.py
|
a8dc71a7b973a28106ea794750a3a20892f118ee
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] |
permissive
|
cloudtools/awacs
|
2f82958ccc7ba2177492c29c706a5737f19dd2d1
|
c449a9637f01c26e73b827a9f8d5cc7715bbbea2
|
refs/heads/main
| 2023-08-31T00:58:28.636568
| 2023-08-28T05:13:01
| 2023-08-28T05:13:01
| 9,062,692
| 385
| 107
|
BSD-2-Clause
| 2023-08-13T23:21:39
| 2013-03-27T20:16:27
|
Python
|
UTF-8
|
Python
| false
| false
| 2,745
|
py
|
chatbot.py
|
# Copyright (c) 2012-2021, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from typing import Optional
from .aws import Action as BaseAction
from .aws import BaseARN
service_name = "AWS Chatbot"
prefix = "chatbot"
class Action(BaseAction):
def __init__(self, action: Optional[str] = None) -> None:
super().__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource: str = "", region: str = "", account: str = "") -> None:
super().__init__(
service=prefix, resource=resource, region=region, account=account
)
CreateChimeWebhookConfiguration = Action("CreateChimeWebhookConfiguration")
CreateMicrosoftTeamsChannelConfiguration = Action(
"CreateMicrosoftTeamsChannelConfiguration"
)
CreateSlackChannelConfiguration = Action("CreateSlackChannelConfiguration")
DeleteChimeWebhookConfiguration = Action("DeleteChimeWebhookConfiguration")
DeleteMicrosoftTeamsChannelConfiguration = Action(
"DeleteMicrosoftTeamsChannelConfiguration"
)
DeleteMicrosoftTeamsConfiguredTeam = Action("DeleteMicrosoftTeamsConfiguredTeam")
DeleteMicrosoftTeamsUserIdentity = Action("DeleteMicrosoftTeamsUserIdentity")
DeleteSlackChannelConfiguration = Action("DeleteSlackChannelConfiguration")
DeleteSlackUserIdentity = Action("DeleteSlackUserIdentity")
DeleteSlackWorkspaceAuthorization = Action("DeleteSlackWorkspaceAuthorization")
DescribeChimeWebhookConfigurations = Action("DescribeChimeWebhookConfigurations")
DescribeSlackChannelConfigurations = Action("DescribeSlackChannelConfigurations")
DescribeSlackChannels = Action("DescribeSlackChannels")
DescribeSlackUserIdentities = Action("DescribeSlackUserIdentities")
DescribeSlackWorkspaces = Action("DescribeSlackWorkspaces")
GetAccountPreferences = Action("GetAccountPreferences")
GetMicrosoftTeamsChannelConfiguration = Action("GetMicrosoftTeamsChannelConfiguration")
GetMicrosoftTeamsOauthParameters = Action("GetMicrosoftTeamsOauthParameters")
GetSlackOauthParameters = Action("GetSlackOauthParameters")
ListMicrosoftTeamsChannelConfigurations = Action(
"ListMicrosoftTeamsChannelConfigurations"
)
ListMicrosoftTeamsConfiguredTeams = Action("ListMicrosoftTeamsConfiguredTeams")
ListMicrosoftTeamsUserIdentities = Action("ListMicrosoftTeamsUserIdentities")
RedeemMicrosoftTeamsOauthCode = Action("RedeemMicrosoftTeamsOauthCode")
RedeemSlackOauthCode = Action("RedeemSlackOauthCode")
UpdateAccountPreferences = Action("UpdateAccountPreferences")
UpdateChimeWebhookConfiguration = Action("UpdateChimeWebhookConfiguration")
UpdateMicrosoftTeamsChannelConfiguration = Action(
"UpdateMicrosoftTeamsChannelConfiguration"
)
UpdateSlackChannelConfiguration = Action("UpdateSlackChannelConfiguration")
|
bbd55ae575bbc04efe5ebf28427a78800f65ea55
|
de2d6e6cbb65729255f288f3b3bf1c065c538573
|
/running_modes/configurations/automated_curriculum_learning/automated_curriculum_learning_input_configuration.py
|
8257e8863db8f5cff4e1f9745d9f690988b9252a
|
[
"LicenseRef-scancode-proprietary-license",
"Apache-2.0"
] |
permissive
|
MolecularAI/Reinvent
|
2edf7736b77107f3c04a6ba7ad8b8a3b39691f31
|
b7324d222a49d18b08335a01649abdb0ac66a734
|
refs/heads/master
| 2022-08-09T00:09:11.752865
| 2022-06-01T15:17:01
| 2022-06-01T15:17:01
| 248,080,601
| 306
| 110
|
Apache-2.0
| 2023-03-05T15:55:52
| 2020-03-17T21:54:26
|
Python
|
UTF-8
|
Python
| false
| false
| 644
|
py
|
automated_curriculum_learning_input_configuration.py
|
from running_modes.configurations.automated_curriculum_learning.base_configuration import BaseConfiguration
from running_modes.configurations.automated_curriculum_learning.curriculum_strategy_input_configuration import \
CurriculumStrategyInputConfiguration
from running_modes.configurations.automated_curriculum_learning.prodcution_strategy_input_configuration import \
ProductionStrategyInputConfiguration
class AutomatedCurriculumLearningInputConfiguration(BaseConfiguration):
agent: str
prior: str
curriculum_strategy: CurriculumStrategyInputConfiguration
production_strategy: ProductionStrategyInputConfiguration
|
67d79c9e6b39dae1645ac0156837b017770461c6
|
4a84dc50411dc2a423ba7408318522b7c26c9487
|
/tests/unit/test_soapui2yaml.py
|
7e72e8b9243f1fbf48c13561170af880e1e314cc
|
[
"Apache-2.0"
] |
permissive
|
Blazemeter/taurus
|
ffdef9b433d6754e5395f6b42fc6838ef817a763
|
81a114bb10ae0b0827079bb7acc2337db8f0e68d
|
refs/heads/master
| 2023-08-19T00:19:12.036869
| 2023-08-14T11:45:09
| 2023-08-14T11:45:09
| 30,940,272
| 1,976
| 555
|
Apache-2.0
| 2023-09-12T09:37:02
| 2015-02-17T22:05:08
|
Python
|
UTF-8
|
Python
| false
| false
| 2,550
|
py
|
test_soapui2yaml.py
|
import yaml
from bzt.soapui2yaml import SoapUI2YAML
from tests.unit import BZTestCase, RESOURCES_DIR, EngineEmul
class FakeOptions(object):
def __init__(self, verbose=True, file_name=None, test_case=None, quiet=False, json=False, log=False):
self.verbose = verbose
self.file_name = file_name
self.test_case = test_case
self.quiet = quiet
self.json = json
self.log = log
class TestConverter(BZTestCase):
def setUp(self):
super(TestConverter, self).setUp()
self.engine = EngineEmul()
def _get_tmp(self, prefix='test', suffix='.yml'):
return self.engine.create_artifact(prefix, suffix)
def configure(self, options, source):
self.tool = SoapUI2YAML(options, source)
self.clean_log()
def test_convert(self):
source = RESOURCES_DIR + "soapui/project.xml"
result = self._get_tmp()
options = FakeOptions(file_name=result, test_case="index")
self.configure(options, source)
self.tool.process()
actual = yaml.full_load(open(result).read())
expected = yaml.full_load(open(RESOURCES_DIR + "soapui/project.xml.yml").read())
self.assertEqual(actual, expected)
def test_flickr(self):
source = RESOURCES_DIR + "soapui/flickr-sample.xml"
result = self._get_tmp()
options = FakeOptions(file_name=result)
self.configure(options, source)
self.tool.process()
actual = yaml.full_load(open(result).read())
expected = yaml.full_load(open(RESOURCES_DIR + "soapui/flickr-sample.xml.yml").read())
self.assertEqual(actual, expected)
def test_egalaxy(self):
source = RESOURCES_DIR + "soapui/egalaxy.xml"
result = self._get_tmp()
options = FakeOptions(file_name=result)
self.configure(options, source)
self.tool.process()
actual = yaml.full_load(open(result).read())
expected = yaml.full_load(open(RESOURCES_DIR + "soapui/egalaxy.xml.yml").read())
self.assertEqual(actual, expected)
def test_smart(self):
source = RESOURCES_DIR + "soapui/smart.xml"
result = self._get_tmp()
options = FakeOptions(file_name=result)
self.configure(options, source)
self.tool.process()
self.configure(options, source)
self.tool.process()
actual = yaml.full_load(open(result).read())
expected = yaml.full_load(open(RESOURCES_DIR + "soapui/smart.xml.yml").read())
self.assertEqual(actual, expected)
|
afe8048ed2a1475466a6c8c1664652548788ff88
|
2f57b9476832bc613ca7428353f9f2ddde89b4f3
|
/impala/hiveserver2.py
|
73f4555ff2a0f24dc65dc31aa8b69c45cd059585
|
[
"Apache-2.0"
] |
permissive
|
cloudera/impyla
|
121f9cf9b4f576046ce60018d022f37b96b712f0
|
b8272c61dd9023019b65666b923469639edb3818
|
refs/heads/master
| 2023-08-28T01:24:53.013395
| 2023-08-01T21:19:09
| 2023-08-02T16:31:31
| 18,780,981
| 720
| 277
|
Apache-2.0
| 2023-08-02T00:28:27
| 2014-04-14T23:52:07
|
Python
|
UTF-8
|
Python
| false
| false
| 59,649
|
py
|
hiveserver2.py
|
# Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import getpass
import re
import socket
import datetime
import operator
import six
import sys
import time
from bitarray import bitarray
from six.moves import range
from thrift.transport.TTransport import TTransportException
from thrift.Thrift import TApplicationException
from thrift.protocol.TBinaryProtocol import TBinaryProtocolAccelerated
from impala._thrift_gen.TCLIService.ttypes import (
TOpenSessionReq, TFetchResultsReq, TCloseSessionReq,
TExecuteStatementReq, TGetInfoReq, TGetInfoType, TTypeId,
TFetchOrientation, TGetResultSetMetadataReq, TStatusCode,
TGetColumnsReq, TGetSchemasReq, TGetTablesReq, TGetFunctionsReq,
TGetOperationStatusReq, TOperationState, TCancelOperationReq,
TCloseOperationReq, TGetLogReq, TProtocolVersion)
from impala._thrift_gen.ImpalaService.ImpalaHiveServer2Service import (
TGetRuntimeProfileReq, TGetExecSummaryReq)
from impala._thrift_api import (
get_socket, get_http_transport, get_transport, ThriftClient)
from impala._thrift_gen.RuntimeProfile.ttypes import TRuntimeProfileFormat
from impala.compat import (Decimal, _xrange as xrange)
from impala.error import (NotSupportedError, OperationalError,
ProgrammingError, HiveServer2Error, HttpError)
from impala.interface import Connection, Cursor, _bind_parameters
from impala.util import get_logger_and_init_null
log = get_logger_and_init_null(__name__)
V6_VERSION = TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V6
class HiveServer2Connection(Connection):
# PEP 249
# HiveServer2Connection objects are associated with a TCLIService.Client
# thrift service
# it's instantiated with an alive TCLIService.Client
def __init__(self, service, default_db=None):
log.debug('HiveServer2Connection(service=%s, default_db=%s)', service,
default_db)
self.service = service
self.default_db = default_db
def close(self):
"""Close the session and the Thrift transport."""
# PEP 249
log.debug('Closing HS2 connection')
self.service.close()
def reconnect(self):
self.service.reconnect()
def commit(self):
"""Impala doesn't support transactions; does nothing."""
# PEP 249
pass
def rollback(self):
"""Impala doesn't support transactions; raises NotSupportedError"""
# PEP 249
raise NotSupportedError
def cursor(self, user=None, configuration=None, convert_types=True,
dictify=False, fetch_error=True):
"""Get a cursor from the HiveServer2 (HS2) connection.
Parameters
----------
user : str, optional
configuration : dict of str keys and values, optional
Configuration overlay for the HS2 session.
convert_types : bool, optional
When `False`, timestamps and decimal values will not be converted
to Python `datetime` and `Decimal` values. (These conversions are
expensive.) Only applies when using HS2 protocol versions > 6.
dictify : bool, optional
When `True` cursor will return key value pairs instead of rows.
fetch_error : bool, optional
In versions of impala prior to 2.7.0, when an operation fails and
the impalad returns an error state, the error message is not always
returned. In these cases the error message can be retrieved by a
subsequent fetch rpc call but this has a side effect of invalidating
the query handle and causing any further operations against it to
fail. e.g. calling log() or profile().
When set to `True` impyla will attempt to fetch the error message.
When set to `False`, this flag will cause impyla not to attempt to
fetch the message with a fetch call . In this case the query
handle remains valid and impyla will raise an exception with a
message of "Operation is in ERROR_STATE".
The Default option is `True`.
Returns
-------
HiveServer2Cursor
A `Cursor` object (DB API 2.0-compliant).
"""
# PEP 249
log.debug('Getting a cursor (Impala session)')
if user is None:
user = getpass.getuser()
log.debug('.cursor(): getting new session_handle')
session = self.service.open_session(user, configuration)
log.debug('HiveServer2Cursor(service=%s, session_handle=%s, '
'default_config=%s, hs2_protocol_version=%s)',
self.service, session.handle,
session.config, session.hs2_protocol_version)
cursor_class = HiveServer2DictCursor if dictify else HiveServer2Cursor
cursor = cursor_class(session, convert_types=convert_types,
fetch_error=fetch_error)
if self.default_db is not None:
log.info('Using database %s as default', self.default_db)
cursor.execute('USE %s' % self.default_db)
return cursor
class HiveServer2Cursor(Cursor):
"""The DB API 2.0 Cursor object.
See the PEP 249 specification for more details.
"""
# PEP 249
# HiveServer2Cursor objects are associated with a Session
# they are instantiated with alive session_handles
def __init__(self, session, convert_types=True, fetch_error=True):
self.session = session
self.convert_types = convert_types
self.fetch_error = fetch_error
self._last_operation = None
self._last_operation_string = None
self._last_operation_active = False
self._last_operation_finished = False
self._buffersize = None
self._buffer = Batch() # zero-length
# initial values, per PEP 249
self._description = None
self._rowcount = -1
self._closed = False
def __del__(self):
if self._closed:
return
try:
self.close_operation()
except Exception:
pass
try:
self.session.close()
except Exception:
pass
@property
def description(self):
# PEP 249
if self._description is None and self.has_result_set:
log.debug('description=None has_result_set=True => getting schema')
schema = self._last_operation.get_result_schema()
self._description = schema
return self._description
@property
def rowcount(self):
# PEP 249
# Note that _rowcount will be always -1 as we do not know the number of rows
# until all rows are fetched from the query.
return self._rowcount
@property
def rowcounts(self):
# Work around to get the number of rows modified for Inserts/Update/Delte statements
modifiedRows, errorRows = -1, -1
if self._last_operation_active:
logList = self.get_profile().split('\n')
resultDict = {}
subs = ['NumModifiedRows', 'NumRowErrors']
resultSet = [s for s in logList if any(item in s for item in subs)]
if resultSet:
for items in resultSet:
key, value = items.split(':')
key, value = key.strip(), value.strip()
resultDict[key] = value
modifiedRows = int(resultDict.get('NumModifiedRows', -1))
errorRows = int(resultDict.get('NumRowErrors', -1))
return (modifiedRows, errorRows)
@property
def lastrowid(self):
# PEP 249
return None
@property
def query_string(self):
return self._last_operation_string
def get_arraysize(self):
# PEP 249
return self._buffersize if self._buffersize else 1
def set_arraysize(self, arraysize):
# PEP 249
log.debug('set_arraysize: arraysize=%s', arraysize)
self._buffersize = arraysize
arraysize = property(get_arraysize, set_arraysize)
@property
def buffersize(self):
# this is for internal use. it provides an alternate default value for
# the size of the buffer, so that calling ._ensure_buffer_is_filled() will read
# multiple rows into a buffer if arraysize hasn't been set. (otherwise, we'd
# get an unbuffered impl because the PEP 249 default value of arraysize
# is 1)
# Impala's batch size is 1024 and older versions of Impala will not return
# more than 1024 rows in one fetch call. Using a bigger value (same as in
# impala-shell) is useful if result spooling is enabled in Impala.
return self._buffersize if self._buffersize else 10240
@property
def has_result_set(self):
return (self._last_operation is not None and
self._last_operation.has_result_set)
def close(self):
# PEP 249
if self._closed:
return
# If an operation is active and isn't closed before the session is
# closed, then the server will cancel the operation upon closing
# the session. Cancellation could be problematic for some DDL
# operations. This avoids requiring the user to call the non-PEP 249
# close_operation().
exc_info = None
try:
self.close_operation()
except Exception:
exc_info = sys.exc_info()
log.debug('Closing HiveServer2Cursor')
try:
self.session.close()
except Exception:
# If we encountered an error when closing the session
# then print operation close exception to logs and
# raise the session close exception
if exc_info:
log.error('Failure encountered closing last operation.',
exc_info=exc_info)
raise
self._closed = True
# If there was an error when closing last operation then
# raise exception
if exc_info:
six.reraise(*exc_info)
def cancel_operation(self, reset_state=True):
if self._last_operation_active:
log.info('Canceling active operation')
self._last_operation.cancel()
if reset_state:
self._reset_state()
def close_operation(self):
if self._last_operation_active:
log.info('Closing active operation')
self._reset_state()
def _reset_state(self):
log.debug('_reset_state: Resetting cursor state')
self._buffer = Batch()
self._description = None
if self._last_operation_active:
self._last_operation_active = False
self._last_operation.close()
self._last_operation_finished = False
self._last_operation_string = None
self._last_operation = None
def execute(self, operation, parameters=None, configuration=None):
"""Synchronously execute a SQL query.
Blocks until results are available.
Parameters
----------
operation : str
The SQL query to execute.
parameters : str, optional
Parameters to be bound to variables in the SQL query, if any.
Impyla supports all DB API `paramstyle`s, including `qmark`,
`numeric`, `named`, `format`, `pyformat`.
configuration : dict of str keys and values, optional
Configuration overlay for this query.
Returns
-------
NoneType
Results are available through a call to `fetch*`.
"""
# PEP 249
self.execute_async(operation, parameters=parameters,
configuration=configuration)
log.debug('Waiting for query to finish')
self._wait_to_finish() # make execute synchronous
log.debug('Query finished')
def execute_async(self, operation, parameters=None, configuration=None):
"""Asynchronously execute a SQL query.
Immediately returns after query is sent to the HS2 server. Poll with
`is_executing`. A call to `fetch*` will block.
Parameters
----------
operation : str
The SQL query to execute.
parameters : str, optional
Parameters to be bound to variables in the SQL query, if any.
Impyla supports all DB API `paramstyle`s, including `qmark`,
`numeric`, `named`, `format`, `pyformat`.
configuration : dict of str keys and values, optional
Configuration overlay for this query.
Returns
-------
NoneType
Results are available through a call to `fetch*`.
"""
log.debug('Executing query %s', operation)
paramstyle = None
if configuration and 'paramstyle' in configuration:
configuration = configuration.copy()
paramstyle = configuration.pop('paramstyle', None)
def op():
if parameters:
self._last_operation_string = _bind_parameters(operation,
parameters,
paramstyle)
else:
self._last_operation_string = operation
op = self.session.execute(self._last_operation_string,
configuration,
run_async=True)
self._last_operation = op
self._execute_async(op)
def _debug_log_state(self):
if self._last_operation_active:
handle = self._last_operation.handle
else:
handle = None
log.debug('_execute_async: self._buffer=%s self._description=%s '
'self._last_operation_active=%s '
'self._last_operation=%s',
self._buffer, self._description,
self._last_operation_active, handle)
def _execute_async(self, operation_fn):
# operation_fn should set self._last_operation_string and
# self._last_operation
self._debug_log_state()
self._reset_state()
self._debug_log_state()
operation_fn()
self._last_operation_active = True
self._debug_log_state()
def _wait_to_finish(self):
# Prior to IMPALA-1633 GetOperationStatus does not populate errorMessage
# in case of failure. If not populated, queries that return results
# can get a failure description with a further call to FetchResults rpc.
if self._last_operation_finished:
return
loop_start = time.time()
while True:
req = TGetOperationStatusReq(operationHandle=self._last_operation.handle)
resp = self._last_operation._rpc('GetOperationStatus', req, True)
self._last_operation.update_has_result_set(resp)
operation_state = TOperationState._VALUES_TO_NAMES[resp.operationState]
log.debug('_wait_to_finish: waited %s seconds so far',
time.time() - loop_start)
if self._op_state_is_error(operation_state):
if resp.errorMessage:
raise OperationalError(resp.errorMessage)
else:
if self.fetch_error and self.has_result_set:
self._last_operation_active=False
self._last_operation.fetch()
else:
raise OperationalError("Operation is in ERROR_STATE")
if not self._op_state_is_executing(operation_state):
self._last_operation_finished = True
break
time.sleep(self._get_sleep_interval(loop_start))
def status(self):
if self._last_operation is None:
raise ProgrammingError("Operation state is not available")
return self._last_operation.get_status()
def execution_failed(self):
if self._last_operation is None:
raise ProgrammingError("Operation state is not available")
operation_state = self._last_operation.get_status()
return self._op_state_is_error(operation_state)
def _op_state_is_error(self, operation_state):
return operation_state == 'ERROR_STATE'
def is_executing(self):
if self._last_operation is None:
raise ProgrammingError("Operation state is not available")
operation_state = self._last_operation.get_status()
return self._op_state_is_executing(operation_state)
def _op_state_is_executing(self, operation_state):
return operation_state in (
'PENDING_STATE', 'INITIALIZED_STATE', 'RUNNING_STATE')
def _get_sleep_interval(self, start_time):
"""Returns a step function of time to sleep in seconds before polling
again. Maximum sleep is 1s, minimum is 0.1s"""
elapsed = time.time() - start_time
if elapsed < 0.05:
return 0.01
elif elapsed < 1.0:
return 0.05
elif elapsed < 10.0:
return 0.1
elif elapsed < 60.0:
return 0.5
return 1.0
def executemany(self, operation, seq_of_parameters, configuration=None):
# PEP 249
log.debug('Attempting to execute %s queries', len(seq_of_parameters))
for parameters in seq_of_parameters:
self.execute(operation, parameters, configuration)
if self.has_result_set:
raise ProgrammingError("Operations that have result sets are "
"not allowed with executemany.")
def fetchone(self):
# PEP 249
self._wait_to_finish()
if not self.has_result_set:
raise ProgrammingError("Tried to fetch but no results.")
log.debug('Fetching a single row')
try:
return next(self)
except StopIteration:
return None
def fetchcbatch(self):
'''Return a CBatch object containing the next rows to be fetched. If data is
currently buffered, returns that data, otherwise fetches the next batch.
Returns None if no more rows are currently available. Note that if None
is returned, more rows may still be available in future.'''
if not self._last_operation.is_columnar:
raise NotSupportedError("Server does not support columnar "
"fetching")
if not self.has_result_set:
raise ProgrammingError(
"Trying to fetch results on an operation with no results.")
if len(self._buffer) > 0:
log.debug('fetchcbatch: buffer has data in. Returning it and wiping buffer')
batch = self._buffer
self._buffer = Batch()
return batch
elif self._last_operation_active:
log.debug('fetchcbatch: buffer empty and op is active => fetching '
'more data')
batch = (self._last_operation.fetch(
self.description,
self.buffersize,
convert_types=self.convert_types))
if len(batch) == 0:
return None
return batch
else:
return None
def fetchmany(self, size=None):
# PEP 249
self._wait_to_finish()
if not self.has_result_set:
raise ProgrammingError("Tried to fetch but no results.")
if size is None:
size = self.arraysize
log.debug('Fetching up to %s result rows', size)
local_buffer = []
while size > 0:
try:
elements = self._pop_from_buffer(size)
local_buffer.extend(elements)
size -= len(elements)
assert size >= 0
except StopIteration:
break
return local_buffer
def fetchall(self):
# PEP 249
self._wait_to_finish()
log.debug('Fetching all result rows')
local_buffer = []
while True:
try:
elements = self._pop_from_buffer(self.buffersize)
local_buffer.extend(elements)
except StopIteration:
break
return local_buffer
def fetchcolumnar(self):
"""Executes a fetchall operation returning a list of CBatches"""
self._wait_to_finish()
if not self._last_operation.is_columnar:
raise NotSupportedError("Server does not support columnar "
"fetching")
batches = []
while True:
batch = (self._last_operation.fetch(
self.description,
self.buffersize,
convert_types=self.convert_types))
if len(batch) == 0:
break
batches.append(batch)
return batches
def setinputsizes(self, sizes):
# PEP 249
pass
def setoutputsize(self, size, column=None):
# PEP 249
pass
def __iter__(self):
return self
def next(self):
return self.__next__()
def __next__(self):
self._ensure_buffer_is_filled()
log.debug('__next__: popping row out of buffer')
return self._buffer.pop()
def _ensure_buffer_is_filled(self):
while True:
if not self.has_result_set:
raise ProgrammingError(
"Trying to fetch results on an operation with no results.")
if len(self._buffer) > 0:
return
elif self._last_operation_active:
log.debug('_ensure_buffer_is_filled: buffer empty and op is active '
'=> fetching more data')
self._buffer = self._last_operation.fetch(self.description,
self.buffersize,
convert_types=self.convert_types)
if len(self._buffer) > 0:
return
if not self._buffer.expect_more_rows:
log.debug('_ensure_buffer_is_filled: no more data to fetch')
raise StopIteration
# If we didn't get rows, but more are expected, need to iterate again.
else:
log.debug('_ensure_buffer_is_filled: buffer empty')
raise StopIteration
def _pop_from_buffer(self, size):
self._ensure_buffer_is_filled()
log.debug('pop_from_buffer: popping row out of buffer')
return self._buffer.pop_many(size)
def ping(self):
"""Checks connection to server by requesting some info."""
log.info('Pinging the impalad')
return self.session.ping()
def get_log(self):
if self._last_operation is None:
raise ProgrammingError("Operation state is not available")
return self._last_operation.get_log()
def get_profile(self, profile_format=TRuntimeProfileFormat.STRING):
if self._last_operation is None:
raise ProgrammingError("Operation state is not available")
return self._last_operation.get_profile(profile_format=profile_format)
def get_summary(self):
return self._last_operation.get_summary()
def build_summary_table(self, summary, output, idx=0,
is_fragment_root=False, indent_level=0):
return build_summary_table(summary, idx, is_fragment_root,
indent_level, output)
def get_databases(self):
def op():
self._last_operation_string = "RPC_GET_DATABASES"
self._last_operation = self.session.get_databases()
self._execute_async(op)
self._wait_to_finish()
def database_exists(self, db_name):
return self.session.database_exists(db_name)
def get_tables(self, database_name=None):
if database_name is None:
database_name = '.*'
def op():
self._last_operation_string = "RPC_GET_TABLES"
self._last_operation = self.session.get_tables(database_name)
self._execute_async(op)
self._wait_to_finish()
def table_exists(self, table_name, database_name=None):
if database_name is None:
database_name = '.*'
return self.session.table_exists(table_name,
database=database_name)
def get_table_schema(self, table_name, database_name=None):
if database_name is None:
database_name = '.*'
def op():
self._last_operation_string = "RPC_DESCRIBE_TABLE"
self._last_operation = self.session.get_table_schema(
table_name, database_name)
self._execute_async(op)
self._wait_to_finish()
results = self.fetchall()
if len(results) == 0:
# TODO: the error raised here should be different
raise OperationalError(
"no schema results for table %s.%s" % (
database_name, table_name))
# check that results are derived from a unique table
tables = set()
for col in results:
tables.add((col[1], col[2]))
if len(tables) > 1:
# TODO: the error raised here should be different
raise ProgrammingError(
"db: %s, table: %s is not unique" % (
database_name, table_name))
return [(r[3], r[5]) for r in results]
def get_functions(self, database_name=None):
if database_name is None:
database_name = '.*'
def op():
self._last_operation_string = "RPC_GET_FUNCTIONS"
self._last_operation = self.session.get_functions(database_name)
self._execute_async(op)
self._wait_to_finish()
class HiveServer2DictCursor(HiveServer2Cursor):
"""The cursor that returns each element as a dictionary"""
def execute(self, operation, parameters=None, configuration=None):
super(self.__class__, self).execute(operation, parameters,
configuration)
if self.description is not None:
self.fields = [d[0] for d in self.description]
else:
self.fields = None
def __next__(self):
record = super(self.__class__, self).__next__()
return dict(zip(self.fields, record))
def _pop_from_buffer(self, size):
records = super(self.__class__, self)._pop_from_buffer(size)
return [dict(zip(self.fields, record)) for record in records]
# This work builds off of:
# 1. the Hue interface:
# hue/apps/beeswax/src/beeswax/server/dbms.py
# hue/apps/beeswax/src/beeswax/server/hive_server2_lib.py
# hue/desktop/core/src/desktop/lib/thrift_util.py
# 2. the Impala shell:
# Impala/shell/original_impala_shell.py
# mapping between the schema types (based on
# com.cloudera.impala.catalog.PrimitiveType) and TColumnValue (in returned
# rows) helper object for converting from TRow to something friendlier
_TTypeId_to_TColumnValue_getters = {
'BOOLEAN': operator.attrgetter('boolVal'),
'TINYINT': operator.attrgetter('byteVal'),
'SMALLINT': operator.attrgetter('i16Val'),
'INT': operator.attrgetter('i32Val'),
'BIGINT': operator.attrgetter('i64Val'),
'TIMESTAMP': operator.attrgetter('stringVal'),
'FLOAT': operator.attrgetter('doubleVal'),
'DOUBLE': operator.attrgetter('doubleVal'),
'STRING': operator.attrgetter('stringVal'),
'DECIMAL': operator.attrgetter('stringVal'),
'BINARY': operator.attrgetter('binaryVal'),
'VARCHAR': operator.attrgetter('stringVal'),
'CHAR': operator.attrgetter('stringVal'),
'MAP': operator.attrgetter('stringVal'),
'ARRAY': operator.attrgetter('stringVal'),
'STRUCT': operator.attrgetter('stringVal'),
'UNIONTYPE': operator.attrgetter('stringVal'),
'NULL': operator.attrgetter('stringVal'),
'DATE': operator.attrgetter('stringVal')
}
_pre_columnar_protocols = [
TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V1,
TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V2,
TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V3,
TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V4,
TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V5,
]
def err_if_rpc_not_ok(resp):
if (resp.status.statusCode != TStatusCode.SUCCESS_STATUS and
resp.status.statusCode != TStatusCode.SUCCESS_WITH_INFO_STATUS and
resp.status.statusCode != TStatusCode.STILL_EXECUTING_STATUS):
raise HiveServer2Error(resp.status.errorMessage)
# datetime only supports 6 digits of microseconds but Impala supports 9.
# If present, the trailing 3 digits will be ignored without warning.
_TIMESTAMP_PATTERN = re.compile(r'(\d+-\d+-\d+ \d+:\d+:\d+(\.\d{,6})?)')
# Regex to extract year/month/date from date.
_DATE_PATTERN = re.compile(r'(\d+)-(\d+)-(\d+)')
def _parse_timestamp(value):
input_value = value
if value:
match = _TIMESTAMP_PATTERN.match(value)
if match:
if match.group(2):
format = '%Y-%m-%d %H:%M:%S.%f'
# use the pattern to truncate the value
value = match.group()
else:
format = '%Y-%m-%d %H:%M:%S'
value = datetime.datetime.strptime(value, format)
else:
raise Exception(
'Cannot convert "{}" into a datetime'.format(value))
else:
value = None
log.debug('%s => %s', input_value, value)
return value
def _parse_date(value):
if value:
match = _DATE_PATTERN.match(value)
if match:
return datetime.date(int(match.group(1)), int(match.group(2)), int(match.group(3)))
else:
raise Exception(
'Cannot convert "{}" into a date'.format(value))
return value
# TODO: Add another decorator that runs the function in its own thread
def threaded(func):
# pylint: disable=unused-argument
raise NotImplementedError
def connect(host, port, timeout=None, use_ssl=False, ca_cert=None,
user=None, password=None, kerberos_service_name='impala',
auth_mechanism=None, krb_host=None, use_http_transport=False,
http_path='', http_cookie_names=None, retries=3, jwt=None,
user_agent=None):
log.debug('Connecting to HiveServer2 %s:%s with %s authentication '
'mechanism', host, port, auth_mechanism)
if krb_host:
kerberos_host = krb_host
else:
kerberos_host = host
if use_http_transport:
# TODO(#362): Add server authentication with thrift 0.12.
if ca_cert:
raise NotSupportedError("Server authentication is not supported " +
"with HTTP endpoints")
transport = get_http_transport(host, port, http_path=http_path,
use_ssl=use_ssl, ca_cert=ca_cert,
auth_mechanism=auth_mechanism,
user=user, password=password,
kerberos_host=kerberos_host,
kerberos_service_name=kerberos_service_name,
http_cookie_names=http_cookie_names,
jwt=jwt, user_agent=user_agent)
else:
sock = get_socket(host, port, use_ssl, ca_cert)
if timeout is not None:
timeout = timeout * 1000. # TSocket expects millis
sock.setTimeout(timeout)
log.debug('sock=%s', sock)
transport = get_transport(sock, kerberos_host, kerberos_service_name,
auth_mechanism, user, password)
transport.open()
protocol = TBinaryProtocolAccelerated(transport)
service = ThriftClient(protocol)
log.debug('transport=%s protocol=%s service=%s', transport, protocol,
service)
return HS2Service(service, retries=retries)
def _is_columnar_protocol(hs2_protocol_version):
return (hs2_protocol_version ==
TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V6)
def _is_precolumnar_protocol(hs2_protocol_version):
return hs2_protocol_version in _pre_columnar_protocols
class Batch(object):
def __init__(self):
pass
def __len__(self):
return 0
def pop(self):
raise NotImplementedError("Cannot pop a Batch object")
def __iter__(self):
return self
def next(self):
return self.__next__()
def __next__(self):
if len(self) > 0:
return self.pop()
raise StopIteration
def __str__(self):
return 'Batch()'
class Column(object):
def __init__(self, data_type, values, nulls):
self.data_type = data_type
self.values = values
self.nulls = nulls
self.rows_left = len(self.values)
self.num_rows = self.rows_left
def __len__(self):
return self.rows_left
def __str__(self):
return 'Column(type={0}, values={1}, nulls={2})'.format(
self.data_type, self.values, self.nulls)
def pop(self):
if self.rows_left < 1:
raise StopIteration
pos = self.num_rows-self.rows_left
self.rows_left -= 1
if self.nulls[pos]:
return None
value = self.values[pos]
return value
def pop_to_preallocated_list(self, output_list, count, offset=0, stride=1):
""" Tries to pop 'count' values and write them to every 'stride'th element of
'output_list' starting with 'offset'.
Returns the number of values popped.
"""
count = min(count, self.rows_left)
start_pos = self.num_rows - self.rows_left
self.rows_left -= count
for pos in xrange(start_pos, start_pos + count):
output_list[offset] = None if self.nulls[pos] else self.values[pos]
offset += stride
return count
class CBatch(Batch):
def __init__(self, trowset, expect_more_rows, schema, convert_types=True):
self.expect_more_rows = expect_more_rows
self.schema = schema
tcols = [_TTypeId_to_TColumnValue_getters[schema[i][1]](col)
for (i, col) in enumerate(trowset.columns)]
num_cols = len(tcols)
num_rows = len(tcols[0].values)
self.remaining_rows = num_rows
log.debug('CBatch: input TRowSet num_cols=%s num_rows=%s tcols=%s',
num_cols, num_rows, tcols)
self.columns = []
for j in range(num_cols):
type_ = schema[j][1]
nulls = tcols[j].nulls
values = tcols[j].values
is_null = bitarray(endian='little')
is_null.frombytes(nulls)
# Ref HUE-2722, HiveServer2 sometimes does not add trailing '\x00'
if len(values) > len(nulls):
to_append = ((len(values) - len(nulls) + 7) // 8)
is_null.frombytes(b'\x00' * to_append)
# STRING columns are read as binary and decoded here to be able to handle
# non-valid utf-8 strings in Python 3.
if six.PY3:
self._convert_strings_to_unicode(type_, is_null, values)
if convert_types:
values = self._convert_values(type_, is_null, values)
self.columns.append(Column(type_, values, is_null))
def _convert_values(self, type_, is_null, values):
# pylint: disable=consider-using-enumerate
if type_ == 'TIMESTAMP':
for i in range(len(values)):
values[i] = (None if is_null[i] else
_parse_timestamp(values[i]))
elif type_ == 'DECIMAL':
for i in range(len(values)):
values[i] = (None if is_null[i] else Decimal(values[i]))
elif type_ == 'DATE':
for i in range(len(values)):
values[i] = (None if is_null[i] else _parse_date(values[i]))
return values
def _convert_strings_to_unicode(self, type_, is_null, values):
if type_ in ["STRING", "LIST", "MAP", "STRUCT", "UNIONTYPE", "DECIMAL", "DATE", "TIMESTAMP", "NULL", "VARCHAR", "CHAR"]:
for i in range(len(values)):
if is_null[i]:
values[i] = None
continue
try:
# Do similar handling of non-valid UTF-8 strings as Thriftpy2:
# https://github.com/Thriftpy/thriftpy2/blob/8e218b3fd89c597c2e83d129efecfe4d280bdd89/thriftpy2/protocol/binary.py#L241
# If decoding fails then keep the original bytearray.
values[i] = values[i].decode("UTF-8")
except UnicodeDecodeError:
pass
def __len__(self):
return self.remaining_rows
def pop(self):
self.remaining_rows -= 1
return tuple([c.pop() for c in self.columns])
def __str__(self):
col_string = ','.join([str(col) for col in self.columns])
return 'CBatch({0})'.format(col_string)
def pop_many(self, row_count):
"""Returns a list of tuples with min('row_count', rows in batch) elements."""
row_count = min(row_count, self.remaining_rows)
self.remaining_rows -= row_count
col_count = len(self.columns)
# 'dataset' holds all rows x columns in list in row major order.
# The transposition of columnar data is done by writing 'dataset' per-column
# and then returning it per-row.
dataset = [None] * (col_count * row_count)
for col_id, col in enumerate(self.columns):
rows_returned = col.pop_to_preallocated_list(
dataset, row_count, offset=col_id, stride=col_count)
assert row_count == rows_returned
# Split 'dataset' to 'col_count' sized sublists and create tuples from them.
return [tuple(dataset[i * col_count: (i + 1) * col_count])
for i in xrange(row_count)]
class RBatch(Batch):
def __init__(self, trowset, expect_more_rows, schema):
log.debug('RBatch: input TRowSet: %s', trowset)
self.expect_more_rows = expect_more_rows
self.schema = schema
self.rows = []
for trow in trowset.rows:
row = []
for (i, col_val) in enumerate(trow.colVals):
type_ = schema[i][1]
value = _TTypeId_to_TColumnValue_getters[type_](col_val).value
if type_ == 'TIMESTAMP':
value = _parse_timestamp(value)
elif type_ == 'DECIMAL':
if value:
value = Decimal(value)
row.append(value)
self.rows.append(tuple(row))
def __len__(self):
return len(self.rows)
def pop(self):
# TODO: this looks extremely inefficient
return self.rows.pop(0)
def pop_many(self, row_count):
row_count = min(row_count, len(self.rows))
result = self.rows[:row_count]
self.rows = self.rows[row_count:]
return result
class ThriftRPC(object):
def __init__(self, client, retries=3):
self.client = client
self.retries = retries
def _rpc(self, func_name, request, retry_on_http_error=False):
self._log_request(func_name, request)
response = self._execute(func_name, request, retry_on_http_error)
self._log_response(func_name, response)
err_if_rpc_not_ok(response)
return response
def _execute(self, func_name, request, retry_on_http_error=False):
# pylint: disable=protected-access
# get the thrift transport
transport = self.client._iprot.trans
tries_left = self.retries
last_http_exception = None
while tries_left > 0:
try:
log.debug('Attempting to open transport (tries_left=%s)',
tries_left)
open_transport(transport)
log.debug('Transport opened')
func = getattr(self.client, func_name)
return func(request)
except socket.error:
log.exception('Failed to open transport (tries_left=%s)',
tries_left)
last_http_exception = None
except TTransportException:
log.exception('Failed to open transport (tries_left=%s)',
tries_left)
last_http_exception = None
except HttpError as h:
if not retry_on_http_error:
log.debug('Caught HttpError %s %s in %s which is not retryable',
h, str(h.body or ''), func_name)
raise
last_http_exception = h
if tries_left > 1:
retry_secs = None
retry_after = h.http_headers.get('Retry-After', None)
if retry_after:
try:
retry_secs = int(retry_after)
except ValueError:
retry_secs = None
if retry_secs:
log.debug("sleeping after seeing Retry-After value of %d", retry_secs)
log.debug('Caught HttpError %s %s in %s (tries_left=%s), retry after %d secs',
h, str(h.body or ''), func_name, tries_left, retry_secs)
time.sleep(retry_secs)
else:
retry_secs = 1 # Future: use exponential backoff?
log.debug("sleeping for %d second before retrying", retry_secs)
time.sleep(retry_secs)
log.debug('Caught HttpError %s %s in %s (tries_left=%s)',
h, str(h.body or ''), func_name, tries_left)
except Exception:
raise
log.debug('Closing transport (tries_left=%s)', tries_left)
transport.close()
tries_left -= 1
if last_http_exception is not None:
raise last_http_exception
raise HiveServer2Error('Failed after retrying {0} times'
.format(self.retries))
def _operation(self, kind, request, retry_on_http_error=False):
resp = self._rpc(kind, request, retry_on_http_error)
return self._get_operation(resp.operationHandle)
def _log_request(self, kind, request):
log.debug('%s: req=%s', kind, request)
def _log_response(self, kind, response):
log.debug('%s: resp=%s', kind, response)
def open_transport(transport):
"""
Open transport if needed.
"""
if not transport.isOpen():
transport.open()
class HS2Service(ThriftRPC):
def __init__(self, thrift_client, retries=3):
ThriftRPC.__init__(self, thrift_client, retries=retries)
def close(self):
# pylint: disable=protected-access
log.debug('close_service: client=%s', self.client)
self.client._iprot.trans.close()
def reconnect(self):
# pylint: disable=protected-access
log.debug('reconnect: client=%s', self.client)
self.client._iprot.trans.close()
self.client._iprot.trans.open()
def open_session(self, user, configuration=None):
protocol = TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V6
req = TOpenSessionReq(client_protocol=protocol,
username=user,
configuration=configuration)
# OpenSession rpcs are idempotent and so ok to retry. If the client gets
# disconnected and the server successfully opened a session, the client
# will retry and rely on server to clean up the session.
resp = self._rpc('OpenSession', req, True)
return HS2Session(self, resp.sessionHandle,
resp.configuration,
resp.serverProtocolVersion)
class HS2Session(ThriftRPC):
def __init__(self, service, handle, config, hs2_protocol_version,
retries=3):
# pylint: disable=protected-access
self.service = service
self.handle = handle
self.config = config
self.hs2_protocol_version = hs2_protocol_version
if hs2_protocol_version not in TProtocolVersion._VALUES_TO_NAMES:
raise HiveServer2Error("Got HiveServer2 version {0}; "
"expected V1 - V6"
.format(hs2_protocol_version))
ThriftRPC.__init__(self, self.service.client, retries=retries)
def close(self):
req = TCloseSessionReq(sessionHandle=self.handle)
# CloseSession rpcs don't retry as a session cannot be closed twice.
self._rpc('CloseSession', req, False)
def execute(self, statement, configuration=None, run_async=False):
req = TExecuteStatementReq(sessionHandle=self.handle,
statement=statement,
confOverlay=configuration,
runAsync=run_async)
# Do not try to retry http requests.
# Read queries should be idempotent but most dml queries are not. Also retrying
# query execution from client could be expensive and so likely makes sense to do
# it if server is also aware of the retries.
return self._operation('ExecuteStatement', req, False)
def get_databases(self, schema='.*'):
req = TGetSchemasReq(sessionHandle=self.handle, schemaName=schema)
return self._operation('GetSchemas', req, True)
def get_tables(self, database='.*', table_like='.*'):
req = TGetTablesReq(sessionHandle=self.handle,
schemaName=database,
tableName=table_like)
return self._operation('GetTables', req, True)
def get_table_schema(self, table, database='.*'):
req = TGetColumnsReq(sessionHandle=self.handle,
schemaName=database,
tableName=table, columnName='.*')
return self._operation('GetColumns', req, True)
def get_functions(self, database='.*'):
# TODO: need to test this one especially
req = TGetFunctionsReq(sessionHandle=self.handle,
schemaName=database,
functionName='.*')
return self._operation('GetFunctions', req, True)
def database_exists(self, db_name):
op = self.get_databases(schema=db_name)
# this only fetches default max_rows, but there should only be one row
# ideally
results = op.fetch()
exists = False
for result in results:
if result[0].lower() == db_name.lower():
exists = True
op.close()
return exists
def table_exists(self, table, database='.*'):
op = self.get_tables(database=database, table_like=table)
results = op.fetch()
exists = False
for result in results:
if result[2].lower() == table.lower():
exists = True
op.close()
return exists
def ping(self):
req = TGetInfoReq(sessionHandle=self.handle,
infoType=TGetInfoType.CLI_SERVER_NAME)
log.debug('ping: req=%s', req)
try:
resp = self.client.GetInfo(req)
except TTransportException:
log.exception('ping: failed')
return False
log.debug('ping: resp=%s', resp)
try:
err_if_rpc_not_ok(resp)
except HiveServer2Error:
log.exception('ping: failed')
return False
return True
def _get_operation(self, handle):
return Operation(self, handle)
class Operation(ThriftRPC):
def __init__(self, session, handle, retries=3):
self.session = session
self.handle = handle
self._schema = None
self._state_has_result_set = None
ThriftRPC.__init__(self, self.session.client, retries=retries)
@property
def has_result_set(self):
# When HIVE_CLI_SERVICE_PROTOCOL_V10 or later API is used and async compilation is
# enabled, self.handle.hasResultSet is not set any longer.
# In this case self._state_has_result_set should be used instead.
if self._state_has_result_set is not None:
return self._state_has_result_set
else:
return self.handle.hasResultSet
def update_has_result_set(self, state):
self._state_has_result_set = state.hasResultSet
def get_status(self):
# pylint: disable=protected-access
req = TGetOperationStatusReq(operationHandle=self.handle)
# GetOperationStatus rpc is idempotent and so safe to retry.
resp = self._rpc('GetOperationStatus', req, True)
self.update_has_result_set(resp)
return TOperationState._VALUES_TO_NAMES[resp.operationState]
def get_state(self):
req = TGetOperationStatusReq(operationHandle=self.handle)
# GetOperationStatus rpc is idempotent and so safe to retry.
resp = self._rpc('GetOperationStatus', req, True)
self.update_has_result_set(resp)
return resp
def get_log(self, max_rows=1024, orientation=TFetchOrientation.FETCH_NEXT):
try:
req = TGetLogReq(operationHandle=self.handle)
# GetLog rpc is idempotent and so safe to retry.
log = self._rpc('GetLog', req, True).log
except TApplicationException as e: # raised if Hive is used
if not e.type == TApplicationException.UNKNOWN_METHOD:
raise
req = TFetchResultsReq(operationHandle=self.handle,
orientation=orientation,
maxRows=max_rows,
fetchType=1)
resp = self._rpc('FetchResults', req, False)
schema = [('Log', 'STRING', None, None, None, None, None)]
log = self._wrap_results(resp.results, resp.hasMoreRows, schema,
convert_types=True)
log = '\n'.join(l[0] for l in log)
return log
def cancel(self):
req = TCancelOperationReq(operationHandle=self.handle)
# CancelOperation rpc is idempotent and so safe to retry.
return self._rpc('CancelOperation', req, True)
def close(self):
req = TCloseOperationReq(operationHandle=self.handle)
# CloseOperation rpc is not idempotent for dml and we're not sure
# here if this is dml or not.
return self._rpc('CloseOperation', req, False)
def get_profile(self, profile_format=TRuntimeProfileFormat.STRING):
req = TGetRuntimeProfileReq(operationHandle=self.handle,
sessionHandle=self.session.handle,
format=profile_format)
# GetRuntimeProfile rpc is idempotent and so safe to retry.
resp = self._rpc('GetRuntimeProfile', req, True)
if profile_format == TRuntimeProfileFormat.THRIFT:
return resp.thrift_profile
return resp.profile
def get_summary(self):
req = TGetExecSummaryReq(operationHandle=self.handle,
sessionHandle=self.session.handle)
# GetExecSummary rpc is idempotent and so safe to retry.
resp = self._rpc('GetExecSummary', req, True)
return resp.summary
def fetch(self, schema=None, max_rows=1024,
orientation=TFetchOrientation.FETCH_NEXT,
convert_types=True):
if not self.has_result_set:
log.debug('fetch_results: has_result_set=False')
return None
# the schema is necessary to pull the proper values (i.e., coalesce)
if schema is None:
schema = self.get_result_schema()
req = TFetchResultsReq(operationHandle=self.handle,
orientation=orientation,
maxRows=max_rows)
# FetchResults rpc is not idempotent unless the client and server communicate and
# results are kept around for retry to be successful.
resp = self._rpc('FetchResults', req, False)
return self._wrap_results(resp.results, resp.hasMoreRows, schema,
convert_types=convert_types)
def _wrap_results(self, results, expect_more_rows, schema, convert_types=True):
if self.is_columnar:
log.debug('fetch_results: constructing CBatch')
return CBatch(results, expect_more_rows, schema, convert_types=convert_types)
else:
log.debug('fetch_results: constructing RBatch')
# TODO: RBatch ignores 'convert_types'
return RBatch(results, expect_more_rows, schema)
@property
def is_columnar(self):
protocol = self.session.hs2_protocol_version
return _is_columnar_protocol(protocol)
def get_result_schema(self):
if not self.has_result_set:
log.debug('get_result_schema: has_result_set=False')
return None
req = TGetResultSetMetadataReq(operationHandle=self.handle)
resp = self._rpc('GetResultSetMetadata', req, True)
schema = []
for column in resp.schema.columns:
# pylint: disable=protected-access
name = column.columnName
entry = column.typeDesc.types[0].primitiveEntry
type_ = TTypeId._VALUES_TO_NAMES[entry.type].split('_')[0]
if type_ == 'DECIMAL':
qualifiers = entry.typeQualifiers.qualifiers
precision = qualifiers['precision'].i32Value
scale = qualifiers['scale'].i32Value
schema.append((name, type_, None, None,
precision, scale, None))
else:
schema.append((name, type_, None, None, None, None, None))
log.debug('get_result_schema: schema=%s', schema)
return schema
def build_summary_table(summary, idx, is_fragment_root, indent_level, output):
"""Direct translation of Coordinator::PrintExecSummary() to recursively
build a list of rows of summary statistics, one per exec node
summary: the TExecSummary object that contains all the summary data
idx: the index of the node to print
is_fragment_root: true if the node to print is the root of a fragment (and
therefore feeds into an exchange)
indent_level: the number of spaces to print before writing the node's
label, to give the appearance of a tree. The 0th child of a node has the
same indent_level as its parent. All other children have an indent_level
of one greater than their parent.
output: the list of rows into which to append the rows produced for this
node and its children.
Returns the index of the next exec node in summary.exec_nodes that should
be processed, used internally to this method only.
"""
# pylint: disable=too-many-locals
attrs = ["latency_ns", "cpu_time_ns", "cardinality", "memory_used"]
# Initialise aggregate and maximum stats
agg_stats, max_stats = TExecStats(), TExecStats()
for attr in attrs:
setattr(agg_stats, attr, 0)
setattr(max_stats, attr, 0)
node = summary.nodes[idx]
for stats in node.exec_stats:
for attr in attrs:
val = getattr(stats, attr)
if val is not None:
setattr(agg_stats, attr, getattr(agg_stats, attr) + val)
setattr(max_stats, attr, max(getattr(max_stats, attr), val))
if len(node.exec_stats) > 0:
avg_time = agg_stats.latency_ns / len(node.exec_stats)
else:
avg_time = 0
# If the node is a broadcast-receiving exchange node, the cardinality of
# rows produced is the max over all instances (which should all have
# received the same number of rows). Otherwise, the cardinality is the sum
# over all instances which process disjoint partitions.
if node.is_broadcast and is_fragment_root:
cardinality = max_stats.cardinality
else:
cardinality = agg_stats.cardinality
est_stats = node.estimated_stats
label_prefix = ""
if indent_level > 0:
label_prefix = "|"
if is_fragment_root:
label_prefix += " " * indent_level
else:
label_prefix += "--" * indent_level
def prettyprint(val, units, divisor):
for unit in units:
if val < divisor:
if unit == units[0]:
return "%d%s" % (val, unit)
else:
return "%3.2f%s" % (val, unit)
val /= divisor
def prettyprint_bytes(byte_val):
return prettyprint(
byte_val, [' B', ' KB', ' MB', ' GB', ' TB'], 1024.0)
def prettyprint_units(unit_val):
return prettyprint(unit_val, ["", "K", "M", "B"], 1000.0)
def prettyprint_time(time_val):
return prettyprint(time_val, ["ns", "us", "ms", "s"], 1000.0)
row = [label_prefix + node.label,
len(node.exec_stats),
prettyprint_time(avg_time),
prettyprint_time(max_stats.latency_ns),
prettyprint_units(cardinality),
prettyprint_units(est_stats.cardinality),
prettyprint_bytes(max_stats.memory_used),
prettyprint_bytes(est_stats.memory_used),
node.label_detail]
output.append(row)
try:
sender_idx = summary.exch_to_sender_map[idx]
# This is an exchange node, so the sender is a fragment root, and
# should be printed next.
build_summary_table(summary, sender_idx, True, indent_level, output)
except (KeyError, TypeError):
# Fall through if idx not in map, or if exch_to_sender_map itself is
# not set
pass
idx += 1
if node.num_children > 0:
first_child_output = []
idx = build_summary_table(summary, idx, False, indent_level,
first_child_output)
# pylint: disable=unused-variable
# TODO: is child_idx supposed to be unused? See #120
for child_idx in range(1, node.num_children):
# All other children are indented (we only have 0, 1 or 2 children
# for every exec node at the moment)
idx = build_summary_table(summary, idx, False, indent_level + 1,
output)
output += first_child_output
return idx
|
c20a33bd6a4369df47c5fe72b85c29886537f3e1
|
95069eaed0c9abe54bb20019511c39053a966236
|
/audio/hparams_audio.py
|
c27408f2bd4baddfff544f56a7c57be2d027ce5f
|
[
"MIT"
] |
permissive
|
xcmyz/FastSpeech
|
722546ff71d717a537cb1782462db457a3d14998
|
1e1a61b5015c951caa551b7fab4080339d697c7c
|
refs/heads/master
| 2023-07-23T03:28:19.696457
| 2022-09-16T06:38:58
| 2022-09-16T06:38:58
| 189,590,197
| 884
| 230
|
MIT
| 2023-07-06T22:00:14
| 2019-05-31T12:27:47
|
Python
|
UTF-8
|
Python
| false
| false
| 155
|
py
|
hparams_audio.py
|
max_wav_value = 32768.0
sampling_rate = 22050
filter_length = 1024
hop_length = 256
win_length = 1024
n_mel_channels = 80
mel_fmin = 0.0
mel_fmax = 8000.0
|
a3fd6cdcaea4fafe6f022a7f0e6b6238209dcdfc
|
10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94
|
/Python/reverse-linked-list.py
|
27ea387334fbefcad96b2d2526f4ee98ad50d30d
|
[
"MIT"
] |
permissive
|
kamyu104/LeetCode-Solutions
|
f54822059405ef4df737d2e9898b024f051fd525
|
4dc4e6642dc92f1983c13564cc0fd99917cab358
|
refs/heads/master
| 2023-09-02T13:48:26.830566
| 2023-08-28T10:11:12
| 2023-08-28T10:11:12
| 152,631,182
| 4,549
| 1,651
|
MIT
| 2023-05-31T06:10:33
| 2018-10-11T17:38:35
|
C++
|
UTF-8
|
Python
| false
| false
| 1,057
|
py
|
reverse-linked-list.py
|
# Time: O(n)
# Space: O(1)
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
def __repr__(self):
if self:
return "{} -> {}".format(self.val, repr(self.next))
# Iterative solution.
class Solution(object):
# @param {ListNode} head
# @return {ListNode}
def reverseList(self, head):
dummy = ListNode(float("-inf"))
while head:
dummy.next, head.next, head = head, dummy.next, head.next
return dummy.next
# Time: O(n)
# Space: O(n)
# Recursive solution.
class Solution2(object):
# @param {ListNode} head
# @return {ListNode}
def reverseList(self, head):
[begin, end] = self.reverseListRecu(head)
return begin
def reverseListRecu(self, head):
if not head:
return [None, None]
[begin, end] = self.reverseListRecu(head.next)
if end:
end.next = head
head.next = None
return [begin, head]
else:
return [head, head]
|
c45c88b36e8e5dec73523b397e1c656e6ccfb283
|
7eaf54a78c9e2117247cb2ab6d3a0c20719ba700
|
/SOFTWARE/A64-TERES/linux-a64/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
|
15c8400240fd9029ae34fca077304337d9c75ca6
|
[
"LicenseRef-scancode-free-unknown",
"Apache-2.0",
"Linux-syscall-note",
"GPL-2.0-only",
"GPL-1.0-or-later"
] |
permissive
|
OLIMEX/DIY-LAPTOP
|
ae82f4ee79c641d9aee444db9a75f3f6709afa92
|
a3fafd1309135650bab27f5eafc0c32bc3ca74ee
|
refs/heads/rel3
| 2023-08-04T01:54:19.483792
| 2023-04-03T07:18:12
| 2023-04-03T07:18:12
| 80,094,055
| 507
| 92
|
Apache-2.0
| 2023-04-03T07:05:59
| 2017-01-26T07:25:50
|
C
|
UTF-8
|
Python
| false
| false
| 1,935
|
py
|
Util.py
|
# Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
|
aa9b786a6c9664d396067fbc0bd7e7933a926ec9
|
7bea5adf7d6284fbad0131d665e957d58adfe7c7
|
/allauth/socialaccount/providers/openid_connect/provider.py
|
03be12beda774e3140352a5eaf178366f4f433a3
|
[
"MIT"
] |
permissive
|
pennersr/django-allauth
|
50c9e71c3666785368e92ed9e19ea0f6a5438cd2
|
6b8911a5ebbabda0d446f2743bd4d00d250ed500
|
refs/heads/main
| 2023-09-03T16:48:10.988418
| 2023-09-02T08:00:53
| 2023-09-02T08:00:53
| 976,994
| 7,719
| 3,481
|
MIT
| 2023-09-14T15:06:57
| 2010-10-10T20:10:52
|
Python
|
UTF-8
|
Python
| false
| false
| 2,422
|
py
|
provider.py
|
from django.urls import reverse
from django.utils.http import urlencode
from allauth.account.models import EmailAddress
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
class OpenIDConnectProviderAccount(ProviderAccount):
def to_str(self):
dflt = super(OpenIDConnectProviderAccount, self).to_str()
return self.account.extra_data.get("name", dflt)
class OpenIDConnectProvider(OAuth2Provider):
id = "openid_connect"
name = "OpenID Connect"
account_class = OpenIDConnectProviderAccount
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.name = self.app.name
@property
def server_url(self):
url = self.app.settings["server_url"]
return self.wk_server_url(url)
def wk_server_url(self, url):
well_known_uri = "/.well-known/openid-configuration"
if not url.endswith(well_known_uri):
url += well_known_uri
return url
def get_login_url(self, request, **kwargs):
url = reverse(
self.app.provider + "_login", kwargs={"provider_id": self.app.provider_id}
)
if kwargs:
url = url + "?" + urlencode(kwargs)
return url
def get_callback_url(self):
return reverse(
self.app.provider + "_callback",
kwargs={"provider_id": self.app.provider_id},
)
@property
def token_auth_method(self):
return self.app.settings.get("token_auth_method")
def get_default_scope(self):
return ["openid", "profile", "email"]
def extract_uid(self, data):
return str(data["sub"])
def extract_common_fields(self, data):
return dict(
email=data.get("email"),
username=data.get("preferred_username"),
name=data.get("name"),
user_id=data.get("user_id"),
picture=data.get("picture"),
)
def extract_email_addresses(self, data):
addresses = []
email = data.get("email")
if email:
addresses.append(
EmailAddress(
email=email,
verified=data.get("email_verified", False),
primary=True,
)
)
return addresses
provider_classes = [OpenIDConnectProvider]
|
5ff43aa2e1fc78574c6c25e6b4115893f9d413c5
|
6186a3787d1e74f1866844491da48b9643c8f1a9
|
/ghostwriter/rolodex/models.py
|
00992839dba64c8b090c9dc32be60556383b672b
|
[
"BSD-3-Clause"
] |
permissive
|
GhostManager/Ghostwriter
|
b46b2421e5737ed0afbf49182dce9eeb5eb31936
|
b9eae4459ba192fbb2d4a5b66f8210d57fd7112a
|
refs/heads/master
| 2023-09-04T02:34:54.085997
| 2023-07-13T22:38:44
| 2023-07-13T22:38:44
| 197,269,443
| 1,011
| 197
|
BSD-3-Clause
| 2023-09-08T00:19:52
| 2019-07-16T21:19:43
|
Python
|
UTF-8
|
Python
| false
| false
| 25,726
|
py
|
models.py
|
"""This contains all the database models used by the Rolodex application."""
# Standard Libraries
from datetime import time, timedelta
# Django Imports
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import models
from django.urls import reverse
# 3rd Party Libraries
from taggit.managers import TaggableManager
from timezone_field import TimeZoneField
# Ghostwriter Libraries
from ghostwriter.oplog.models import OplogEntry
from ghostwriter.rolodex.validators import validate_ip_range
from ghostwriter.reporting.models import ReportFindingLink
User = get_user_model()
class Client(models.Model):
"""
Stores an individual client.
"""
name = models.CharField(
"Client Name",
max_length=255,
unique=True,
help_text="Provide the client's full name as you want it to appear in a report",
)
short_name = models.CharField(
"Client Short Name",
max_length=255,
null=True,
blank=True,
help_text="Provide an abbreviated name to be used in reports",
)
codename = models.CharField(
"Client Codename",
max_length=255,
null=True,
blank=True,
help_text="Give the client a codename (might be a ticket number, CMS reference, or something else)",
)
note = models.TextField(
"Client Note",
null=True,
blank=True,
help_text="Describe the client or provide some additional information",
)
timezone = TimeZoneField(
"Client Timezone",
default="America/Los_Angeles",
help_text="Primary timezone of the client",
)
address = models.TextField(
"Client Business Address",
null=True,
blank=True,
help_text="An address to be used for reports or shipping",
)
tags = TaggableManager(blank=True)
class Meta:
ordering = ["name"]
verbose_name = "Client"
verbose_name_plural = "Clients"
def get_absolute_url(self):
return reverse("rolodex:client_detail", args=[str(self.id)])
def __str__(self):
return self.name
class ClientContact(models.Model):
"""
Stores an individual point of contact, related to :model:`rolodex.Client`.
"""
name = models.CharField("Name", help_text="Enter the contact's full name", max_length=255, null=True)
job_title = models.CharField(
"Title or Role",
max_length=255,
null=True,
blank=True,
help_text="Enter the contact's job title or project role as you want it to appear in a report",
)
email = models.CharField(
"Email",
max_length=255,
null=True,
blank=True,
help_text="Enter an email address for this contact",
)
# The ITU E.164 states phone numbers should not exceed 15 characters
# We want valid phone numbers, but validating them (here or in forms) is unnecessary
# Numbers are not used for anything – and any future use would involve human involvement
# The `max_length` allows for people adding spaces, other chars, and extension numbers
phone = models.CharField(
"Phone",
max_length=50,
null=True,
blank=True,
help_text="Enter a phone number for this contact",
)
timezone = TimeZoneField(
"Timezone",
default="America/Los_Angeles",
help_text="The contact's timezone",
)
note = models.TextField(
"Client Note",
null=True,
blank=True,
help_text="Provide additional information about the contact",
)
# Foreign keys
client = models.ForeignKey(Client, on_delete=models.CASCADE, null=False, blank=False)
class Meta:
ordering = ["client", "id"]
verbose_name = "Client POC"
verbose_name_plural = "Client POCs"
def __str__(self):
return f"{self.name} ({self.client})"
class ProjectType(models.Model):
"""
Stores an individual project type, related to :model:`rolodex.Project`.
"""
project_type = models.CharField(
"Project Type",
max_length=255,
unique=True,
help_text="Enter a project type (e.g. red team, penetration test)",
)
class Meta:
ordering = ["project_type"]
verbose_name = "Project type"
verbose_name_plural = "Project types"
def __str__(self):
return self.project_type
class Project(models.Model):
"""
Stores an individual project, related to :model:`rolodex.Client`,
:model:`rolodex.ProjectType`, and :model:`users.User`.
"""
codename = models.CharField(
"Project Codename",
max_length=255,
null=True,
blank=True,
help_text="Give the project a codename (might be a ticket number, PMO reference, or something else)",
)
start_date = models.DateField("Start Date", max_length=12, help_text="Enter the start date of this project")
end_date = models.DateField("End Date", max_length=12, help_text="Enter the end date of this project")
note = models.TextField(
"Notes",
null=True,
blank=True,
help_text="Provide additional information about the project and planning",
)
slack_channel = models.CharField(
"Project Slack Channel",
max_length=255,
null=True,
blank=True,
help_text="Provide an Slack channel to be used for project notifications",
)
complete = models.BooleanField("Completed", default=False, help_text="Mark this project as complete")
timezone = TimeZoneField(
"Project Timezone",
default="America/Los_Angeles",
help_text="Timezone of the project / working hours",
)
start_time = models.TimeField(
"Start Time",
default=time(9, 00),
null=True,
blank=True,
help_text="Select the start time for each day",
)
end_time = models.TimeField(
"End Time",
default=time(17, 00),
null=True,
blank=True,
help_text="Select the end time for each day",
)
tags = TaggableManager(blank=True)
# Foreign keys
client = models.ForeignKey(
"Client",
on_delete=models.CASCADE,
null=False,
help_text="Select the client to which this project should be attached",
)
operator = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, null=True, blank=True)
project_type = models.ForeignKey(
"ProjectType",
on_delete=models.PROTECT,
null=False,
help_text="Select a category for this project that best describes the work being performed",
)
def count_findings(self):
"""
Count and return the number of findings across all reports associated with
an individual :model:`rolodex.Project`.
"""
finding_queryset = ReportFindingLink.objects.select_related("report", "report__project").filter(
report__project=self.pk
)
return finding_queryset.count()
count = property(count_findings)
class Meta:
ordering = ["-start_date", "end_date", "client", "project_type"]
verbose_name = "Project"
verbose_name_plural = "Projects"
def get_absolute_url(self):
return reverse("rolodex:project_detail", args=[str(self.id)])
def __str__(self):
return f"{self.start_date} {self.client} {self.project_type} ({self.codename})"
class ProjectRole(models.Model):
"""
Stores an individual project role.
"""
project_role = models.CharField(
"Project Role",
max_length=255,
unique=True,
help_text="Enter an operator role used for project assignments",
)
class Meta:
ordering = ["project_role"]
verbose_name = "Project role"
verbose_name_plural = "Project roles"
def __str__(self):
return self.project_role
class ProjectAssignment(models.Model):
"""
Stores an individual project assignment, related to :model:`users.User`,
:model:`rolodex.Project`, and :model:`rolodex.ProjectRole`.
"""
start_date = models.DateField(
"Start Date",
null=True,
blank=True,
help_text="Enter the start date of the project",
)
end_date = models.DateField(
"End Date",
null=True,
blank=True,
help_text="Enter the end date of the project",
)
note = models.TextField(
"Notes",
null=True,
blank=True,
help_text="Provide additional information about the project role and assignment",
)
# Foreign keys
operator = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
null=True,
blank=True,
help_text="Select a user to assign to this project",
)
project = models.ForeignKey(Project, on_delete=models.CASCADE, null=False)
role = models.ForeignKey(
ProjectRole,
on_delete=models.PROTECT,
null=True,
blank=True,
help_text="Select a role that best describes the selected user's role in this project",
)
class Meta:
ordering = ["project", "start_date", "operator"]
verbose_name = "Project assignment"
verbose_name_plural = "Project assignments"
def get_absolute_url(self):
return reverse("rolodex:project_detail", args=[str(self.project.id)])
def __str__(self):
return f"{self.operator} - {self.project} {self.end_date})"
class ObjectiveStatus(models.Model):
"""
Stores an individual objective status.
"""
objective_status = models.CharField(
"Objective Status",
max_length=255,
unique=True,
help_text="Objective's status",
)
class Meta:
ordering = ["objective_status"]
verbose_name = "Objective status"
verbose_name_plural = "Objective status"
def __str__(self):
return self.objective_status
class ObjectivePriority(models.Model):
"""
Stores an individual objective priority category.
"""
weight = models.IntegerField(
"Priority Weight",
default=1,
help_text="Weight for sorting this priority when viewing objectives (lower numbers are higher priority)",
)
priority = models.CharField(
"Objective Priority",
max_length=255,
unique=True,
help_text="Objective's priority",
)
class Meta:
ordering = ["weight", "priority"]
verbose_name = "Objective priority"
verbose_name_plural = "Objective priorities"
def __str__(self):
return self.priority
class ProjectObjective(models.Model):
"""
Stores an individual project objective, related to an individual :model:`rolodex.Project`
and :model:`rolodex.ObjectiveStatus`.
"""
def get_status(): # pragma: no cover
"""Get the default status for the status field."""
try:
active_status = ObjectiveStatus.objects.get(objective_status="Active")
return active_status.id
except ObjectiveStatus.DoesNotExist:
return 1
objective = models.CharField(
"Objective",
max_length=255,
null=True,
blank=True,
help_text="Provide a high-level objective – add sub-tasks later for planning or as you discover obstacles",
)
description = models.TextField(
"Description",
null=True,
blank=True,
help_text="Provide a more detailed description, purpose, or context",
)
complete = models.BooleanField("Completed", default=False, help_text="Mark the objective as complete")
deadline = models.DateField(
"Due Date",
max_length=12,
null=True,
blank=True,
help_text="Objective's deadline/due date",
)
marked_complete = models.DateField(
"Marked Complete",
null=True,
blank=True,
help_text="Date the objective was marked complete",
)
position = models.IntegerField(
"List Position",
default=1,
)
# Foreign Keys
project = models.ForeignKey(
Project,
on_delete=models.CASCADE,
null=False,
)
status = models.ForeignKey(
ObjectiveStatus,
on_delete=models.PROTECT,
default=get_status,
help_text="Set the status for this objective",
)
priority = models.ForeignKey(
ObjectivePriority,
on_delete=models.PROTECT,
null=True,
help_text="Assign a priority category",
)
class Meta:
ordering = [
"project",
"position",
"complete",
"priority__weight",
"deadline",
"status",
"objective",
]
verbose_name = "Project objective"
verbose_name_plural = "Project objectives"
def __str__(self):
return f"{self.project} - {self.objective} {self.status})"
def calculate_status(self):
"""
Calculate and return a percentage complete estimate based on ``complete`` value
and any status of related :model:`ProjectSubTask` entries.
"""
total_tasks = self.projectsubtask_set.all().count()
completed_tasks = 0
if self.complete:
return 100.0
if total_tasks > 0:
for task in self.projectsubtask_set.all():
if task.complete:
completed_tasks += 1
return round(completed_tasks / total_tasks * 100, 1)
return 0
class ProjectSubTask(models.Model):
"""
Stores an individual sub-task, related to an individual :model:`rolodex.ProjectObjective`
and :model:`rolodex.ObjectiveStatus`.
"""
def get_status(): # pragma: no cover
"""Get the default status for the status field."""
try:
active_status = ObjectiveStatus.objects.get(objective_status="Active")
return active_status.id
except ObjectiveStatus.DoesNotExist:
return 1
task = models.TextField("Task", null=True, blank=True, help_text="Provide a concise objective")
complete = models.BooleanField("Completed", default=False, help_text="Mark the objective as complete")
deadline = models.DateField(
"Due Date",
max_length=12,
null=True,
blank=True,
help_text="Provide a deadline for this objective",
)
marked_complete = models.DateField(
"Marked Complete",
null=True,
blank=True,
help_text="Date the task was marked complete",
)
# Foreign Keys
parent = models.ForeignKey(ProjectObjective, on_delete=models.CASCADE, null=False)
status = models.ForeignKey(
ObjectiveStatus,
on_delete=models.PROTECT,
default=get_status,
help_text="Set the status for this objective",
)
class Meta:
ordering = ["parent", "complete", "deadline", "status", "task"]
verbose_name = "Objective sub-task"
verbose_name_plural = "Objective sub-tasks"
def __str__(self):
return f"{self.parent.project} : {self.task} ({self.status})"
class ClientNote(models.Model):
"""
Stores an individual note, related to an individual :model:`rolodex.Client` and :model:`users.User`.
"""
# This field is automatically filled with the current date
timestamp = models.DateField("Timestamp", auto_now_add=True, help_text="Creation timestamp")
note = models.TextField(
"Notes",
null=True,
blank=True,
help_text="Leave the client or related projects",
)
# Foreign Keys
client = models.ForeignKey(Client, on_delete=models.CASCADE, null=False)
operator = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, null=True, blank=True)
class Meta:
ordering = ["client", "timestamp"]
verbose_name = "Client note"
verbose_name_plural = "Client notes"
def __str__(self):
return f"{self.client}: {self.timestamp} - {self.note}"
class ProjectNote(models.Model):
"""
Stores an individual note, related to :model:`rolodex.Project` and :model:`users.User`.
"""
# This field is automatically filled with the current date
timestamp = models.DateField("Timestamp", auto_now_add=True, help_text="Creation timestamp")
note = models.TextField(
"Notes",
null=True,
blank=True,
help_text="Leave a note about the project or related client",
)
# Foreign Keys
project = models.ForeignKey(Project, on_delete=models.CASCADE, null=False)
operator = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, null=True, blank=True)
class Meta:
ordering = ["project", "timestamp"]
verbose_name = "Project note"
verbose_name_plural = "Project notes"
def __str__(self):
return f"{self.project}: {self.timestamp} - {self.note}"
class ProjectScope(models.Model):
"""
Stores an individual scope list, related to an individual :model:`rolodex.Project`.
"""
name = models.CharField(
"Scope Name",
max_length=255,
null=True,
blank=True,
help_text="Provide a descriptive name for this list (e.g., External IPs, Cardholder Data Environment)",
)
scope = models.TextField(
"Scope",
null=True,
blank=True,
help_text="Provide a list of IP addresses, ranges, hostnames, or a mix with each entry on a new line",
)
description = models.TextField(
"Description",
null=True,
blank=True,
help_text="Provide a brief description of this list",
)
disallowed = models.BooleanField(
"Disallowed",
default=False,
help_text="Flag this list as off limits / not to be touched",
)
requires_caution = models.BooleanField(
"Requires Caution",
default=False,
help_text="Flag this list as requiring caution or prior warning before testing",
)
# Foreign Keys
project = models.ForeignKey(Project, on_delete=models.CASCADE, null=False)
class Meta:
ordering = ["project", "name"]
verbose_name = "Project scope list"
verbose_name_plural = "Project scope lists"
def __str__(self):
return f"{self.project}: {self.name}"
def count_lines(self):
"""Returns the number of lines in the scope list."""
return len(self.scope.splitlines())
def count_lines_str(self):
"""Returns the number of lines in the scope list as a string."""
count = len(self.scope.splitlines())
if count > 1:
return f"{count} Lines"
return f"{count} Line"
class ProjectTarget(models.Model):
"""
Stores an individual target host, related to an individual :model:`rolodex.Project`.
"""
ip_address = models.CharField(
"IP Address",
max_length=45,
null=True,
blank=True,
validators=[validate_ip_range],
help_text="Enter the IP address or range of the target host(s)",
)
hostname = models.CharField(
"Hostname / FQDN",
max_length=255,
null=True,
blank=True,
help_text="Provide the target's hostname, fully qualified domain name, or other identifier",
)
note = models.TextField(
"Notes",
null=True,
blank=True,
help_text="Provide additional information about the target(s) or the environment",
)
compromised = models.BooleanField("Compromised", default=False, help_text="Flag this target as compromised")
# Foreign Keys
project = models.ForeignKey(Project, on_delete=models.CASCADE, null=False)
class Meta:
ordering = ["project", "compromised", "ip_address", "hostname"]
verbose_name = "Project target"
verbose_name_plural = "Project targets"
def __str__(self):
return f"{self.hostname} ({self.ip_address})"
class ClientInvite(models.Model):
"""
Links an individual :model:`users.User` to a :model:`rolodex.Client` to
which they have been granted access.
"""
comment = models.TextField(
"Comment",
null=True,
blank=True,
help_text="Optional explanation for this invite",
)
# Foreign Keys
client = models.ForeignKey(Client, on_delete=models.CASCADE, null=False)
user = models.ForeignKey(User, on_delete=models.CASCADE, null=False)
class Meta:
ordering = ["client_id", "user_id"]
verbose_name = "Client invite"
verbose_name_plural = "Client invites"
def __str__(self):
return f"{self.user} ({self.client})"
class ProjectInvite(models.Model):
"""
Links an individual :model:`users.User` to a :model:`rolodex.Project` to
which they have been granted access.
"""
comment = models.TextField(
"Comment",
null=True,
blank=True,
help_text="Optional explanation for this invite",
)
# Foreign Keys
project = models.ForeignKey(Project, on_delete=models.CASCADE, null=False)
user = models.ForeignKey(User, on_delete=models.CASCADE, null=False)
class Meta:
ordering = ["project_id", "user_id"]
verbose_name = "Project invite"
verbose_name_plural = "Project invites"
def __str__(self):
return f"{self.user} ({self.project})"
class DeconflictionStatus(models.Model):
"""
Stores an individual deconfliction status.
"""
status = models.CharField(
"Status",
max_length=255,
unique=True,
help_text="Status for a deconfliction request (e.g., Undetermined, Confirmed, Unrelated)",
)
weight = models.IntegerField(
"Status Weight",
default=1,
help_text="Weight for sorting status",
)
class Meta:
ordering = ["weight", "status"]
verbose_name = "Deconfliction status"
verbose_name_plural = "Deconfliction status"
def __str__(self):
return f"{self.status}"
class Deconfliction(models.Model):
"""
Stores an individual deconfliction, related to an individual :model:`rolodex.Project`.
"""
created_at = models.DateTimeField(
"Timestamp",
auto_now_add=True,
help_text="Date and time this deconfliction was created",
)
report_timestamp = models.DateTimeField(
"Report Timestamp",
help_text="Date and time the client informed you and requested deconfliction",
)
alert_timestamp = models.DateTimeField(
"Alert Timestamp",
null=True,
blank=True,
help_text="Date and time the alert fired",
)
response_timestamp = models.DateTimeField(
"Response Timestamp",
null=True,
blank=True,
help_text="Date and time you responded to the report",
)
title = models.CharField(
"Deconfliction Title",
max_length=255,
help_text="Provide a descriptive title or headline for this deconfliction",
)
description = models.TextField(
"Description",
null=True,
blank=True,
help_text="Provide a brief description of this deconfliction request",
)
alert_source = models.CharField(
"Alert Source",
max_length=255,
null=True,
blank=True,
help_text="Source of the alert (e.g., user reported, EDR, MDR, etc.)",
)
# Foreign Keys
project = models.ForeignKey(Project, on_delete=models.CASCADE, null=False)
status = models.ForeignKey(
"DeconflictionStatus",
on_delete=models.PROTECT,
null=True,
help_text="Select a status that best reflects the current state of this deconfliction (e.g., undetermined, confirmed assessment activity, or unrelated to assessment activity)",
)
class Meta:
ordering = ["project", "-created_at", "status__weight", "title"]
verbose_name = "Project deconfliction"
verbose_name_plural = "Project deconflictions"
@property
def log_entries(self):
"""Get log entries that precede the alert by one hour."""
logs = None
if self.alert_timestamp:
one_hour_ago = self.alert_timestamp - timedelta(hours=1)
logs = OplogEntry.objects.filter(
models.Q(oplog_id__project=self.project)
& models.Q(start_date__range=(one_hour_ago, self.alert_timestamp))
)
return logs
def __str__(self):
return f"{self.project}: {self.title}"
class WhiteCard(models.Model):
"""
Stores an individual white card, related to an individual :model:`rolodex.Project`.
"""
issued = models.DateTimeField(
"Issued",
blank=True,
null=True,
help_text="Date and time the client issued this white card",
)
title = models.CharField(
"Title",
max_length=255,
blank=True,
null=True,
help_text="Provide a descriptive headline for this white card (e.g., a username, hostname, or short sentence",
)
description = models.TextField(
"Description",
blank=True,
null=True,
help_text="Provide a brief description of this white card",
)
# Foreign Keys
project = models.ForeignKey(Project, on_delete=models.CASCADE, null=False)
class Meta:
ordering = ["project", "-issued", "title"]
verbose_name = "Project white card"
verbose_name_plural = "Project white cards"
def __str__(self):
return f"{self.project}: {self.title}"
|
67d52c44017973e85dceea72a88a4c178a3e97f8
|
1acb41c1157c8eb5c3988f24baa72bdbf697931c
|
/easyfsl/methods/few_shot_classifier.py
|
c77202b9afdaabace8e82e23e2cf98703a96913d
|
[
"MIT"
] |
permissive
|
sicara/easy-few-shot-learning
|
f2050a7dcacc9fcc1de7502f03fd3491cc175f69
|
509dd9aa60879ad76b687002dc5648c9b4f337b8
|
refs/heads/master
| 2023-09-01T19:24:10.217392
| 2023-09-01T08:59:43
| 2023-09-01T08:59:43
| 334,984,118
| 634
| 107
|
MIT
| 2023-09-11T15:23:48
| 2021-02-01T14:55:41
|
Python
|
UTF-8
|
Python
| false
| false
| 6,316
|
py
|
few_shot_classifier.py
|
from abc import abstractmethod
from typing import Optional
import torch
from torch import Tensor, nn
from easyfsl.methods.utils import compute_prototypes
class FewShotClassifier(nn.Module):
"""
Abstract class providing methods usable by all few-shot classification algorithms
"""
def __init__(
self,
backbone: Optional[nn.Module] = None,
use_softmax: bool = False,
feature_centering: Optional[Tensor] = None,
feature_normalization: Optional[float] = None,
):
"""
Initialize the Few-Shot Classifier
Args:
backbone: the feature extractor used by the method. Must output a tensor of the
appropriate shape (depending on the method).
If None is passed, the backbone will be initialized as nn.Identity().
use_softmax: whether to return predictions as soft probabilities
feature_centering: a features vector on which to center all computed features.
If None is passed, no centering is performed.
feature_normalization: a value by which to normalize all computed features after centering.
It is used as the p argument in torch.nn.functional.normalize().
If None is passed, no normalization is performed.
"""
super().__init__()
self.backbone = backbone if backbone is not None else nn.Identity()
self.use_softmax = use_softmax
self.prototypes = torch.tensor(())
self.support_features = torch.tensor(())
self.support_labels = torch.tensor(())
self.feature_centering = (
feature_centering if feature_centering is not None else torch.tensor(0)
)
self.feature_normalization = feature_normalization
@abstractmethod
def forward(
self,
query_images: Tensor,
) -> Tensor:
"""
Predict classification labels.
Args:
query_images: images of the query set of shape (n_query, **image_shape)
Returns:
a prediction of classification scores for query images of shape (n_query, n_classes)
"""
raise NotImplementedError(
"All few-shot algorithms must implement a forward method."
)
def process_support_set(
self,
support_images: Tensor,
support_labels: Tensor,
):
"""
Harness information from the support set, so that query labels can later be predicted using a forward call.
The default behaviour shared by most few-shot classifiers is to compute prototypes and store the support set.
Args:
support_images: images of the support set of shape (n_support, **image_shape)
support_labels: labels of support set images of shape (n_support, )
"""
self.compute_prototypes_and_store_support_set(support_images, support_labels)
@staticmethod
def is_transductive() -> bool:
raise NotImplementedError(
"All few-shot algorithms must implement a is_transductive method."
)
def compute_features(self, images: Tensor) -> Tensor:
"""
Compute features from images and perform centering and normalization.
Args:
images: images of shape (n_images, **image_shape)
Returns:
features of shape (n_images, feature_dimension)
"""
original_features = self.backbone(images)
centered_features = original_features - self.feature_centering
if self.feature_normalization is not None:
return nn.functional.normalize(
centered_features, p=self.feature_normalization, dim=1
)
return centered_features
def softmax_if_specified(self, output: Tensor, temperature: float = 1.0) -> Tensor:
"""
If the option is chosen when the classifier is initialized, we perform a softmax on the
output in order to return soft probabilities.
Args:
output: output of the forward method of shape (n_query, n_classes)
temperature: temperature of the softmax
Returns:
output as it was, or output as soft probabilities, of shape (n_query, n_classes)
"""
return (temperature * output).softmax(-1) if self.use_softmax else output
def l2_distance_to_prototypes(self, samples: Tensor) -> Tensor:
"""
Compute prediction logits from their euclidean distance to support set prototypes.
Args:
samples: features of the items to classify of shape (n_samples, feature_dimension)
Returns:
prediction logits of shape (n_samples, n_classes)
"""
return -torch.cdist(samples, self.prototypes)
def cosine_distance_to_prototypes(self, samples) -> Tensor:
"""
Compute prediction logits from their cosine distance to support set prototypes.
Args:
samples: features of the items to classify of shape (n_samples, feature_dimension)
Returns:
prediction logits of shape (n_samples, n_classes)
"""
return (
nn.functional.normalize(samples, dim=1)
@ nn.functional.normalize(self.prototypes, dim=1).T
)
def compute_prototypes_and_store_support_set(
self,
support_images: Tensor,
support_labels: Tensor,
):
"""
Extract support features, compute prototypes, and store support labels, features, and prototypes.
Args:
support_images: images of the support set of shape (n_support, **image_shape)
support_labels: labels of support set images of shape (n_support, )
"""
self.support_labels = support_labels
self.support_features = self.compute_features(support_images)
self._raise_error_if_features_are_multi_dimensional(self.support_features)
self.prototypes = compute_prototypes(self.support_features, support_labels)
@staticmethod
def _raise_error_if_features_are_multi_dimensional(features: Tensor):
if len(features.shape) != 2:
raise ValueError(
"Illegal backbone or feature shape. "
"Expected output for an image is a 1-dim tensor."
)
|
3e2c979887c2a2252f392ee8cc8034352a6cc9ed
|
3f39a707fdcf8bd0184cf044a6cb039e659738b7
|
/data_wizard/rest.py
|
54bef701f23499a390b4011c7430969d5d625232
|
[
"MIT"
] |
permissive
|
wq/django-data-wizard
|
b166882c163d19d3facb9cc648dc442ab8f64a51
|
2cf34acd96e6e6ad8eaff5f2fb5dee6dd94416d5
|
refs/heads/main
| 2023-07-20T03:59:38.137970
| 2023-07-13T21:33:40
| 2023-07-13T21:33:40
| 25,331,283
| 331
| 56
|
MIT
| 2023-06-08T15:57:41
| 2014-10-17T01:26:45
|
Python
|
UTF-8
|
Python
| false
| false
| 2,516
|
py
|
rest.py
|
from rest_framework import serializers
from wq.db import rest
from wq.db.rest.views import ModelViewSet
from wq.db.rest.serializers import ModelSerializer
from wq.db.rest.renderers import HTMLRenderer, JSONRenderer
from .models import Run
from . import views as wizard
from . import autodiscover
from rest_framework.settings import api_settings
autodiscover()
# wq.db-compatible serializers
class CurrentUserDefault(serializers.CurrentUserDefault):
def __call__(self, serializer=None):
if getattr(self, "requires_context", None):
# DRF 3.11+
user = super(CurrentUserDefault, self).__call__(serializer)
else:
# DRF 3.10 and earlier
user = super(CurrentUserDefault, self).__call__()
return user.pk
class RunSerializer(ModelSerializer, wizard.RunSerializer):
user_id = serializers.HiddenField(default=CurrentUserDefault())
class Meta:
exclude = ["content_type"]
class RecordSerializer(wizard.RecordSerializer):
def get_object_url(self, instance):
obj = instance.content_object
conf = rest.router.get_model_config(type(obj))
if not conf:
return None
base = rest.router.get_base_url()
url = conf["url"]
objid = getattr(obj, conf.get("lookup", "pk"))
return f"{base}/{url}/{objid}"
class RunViewSet(ModelViewSet, wizard.RunViewSet):
record_serializer_class = RecordSerializer
renderer_classes = [HTMLRenderer, JSONRenderer]
pagination_class = api_settings.DEFAULT_PAGINATION_CLASS
authentication_classes = api_settings.DEFAULT_AUTHENTICATION_CLASSES
_namespace = "wq"
# wq.db router registration
def user_filter(qs, request):
if request.user.is_authenticated:
return qs.filter(user=request.user)
else:
return qs.none()
rest.router.register_model(
Run,
serializer=RunSerializer,
viewset=RunViewSet,
url="datawizard",
modes=["list", "detail", "edit"]
+ [
action.url_path
for action in RunViewSet.get_extra_actions()
if "get" in action.mapping
and action.url_path not in ("edit", "status")
],
background_sync=False,
postsave=(
"datawizard/{{id}}"
"{{#current_mode}}/{{current_mode}}{{/current_mode}}"
"{{#task_id}}?task={{task_id}}{{/task_id}}"
),
fields="__all__",
filter=user_filter,
cache="none",
show_in_index="can_change",
section="Data Wizard",
order=210,
icon="list",
)
|
fb32eb55f45b7a38298616c8a66991e156a29496
|
3a6a211ea0d32405497fbd6486c490bb147e25f9
|
/dashboard/dashboard/pinpoint/models/quest/quest_test.py
|
6a9d645a1e3a91b3cc84bbbf13c1d3d4828d6b2c
|
[
"BSD-3-Clause"
] |
permissive
|
catapult-project/catapult
|
e2cbdd5eb89f3b1492fc8752494e62ea1df4bae0
|
53102de187a48ac2cfc241fef54dcbc29c453a8e
|
refs/heads/main
| 2021-05-25T07:37:22.832505
| 2021-05-24T08:01:49
| 2021-05-25T06:07:38
| 33,947,548
| 2,032
| 742
|
BSD-3-Clause
| 2022-08-26T16:01:18
| 2015-04-14T17:49:05
|
HTML
|
UTF-8
|
Python
| false
| false
| 2,228
|
py
|
quest_test.py
|
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import itertools
import unittest
from dashboard.pinpoint.models.quest import execution_test
from dashboard.pinpoint.models.quest import quest
class _QuestStub(quest.Quest):
def __str__(self):
return 'Quest'
@classmethod
def FromDict(cls, arguments):
return cls()
class QuestCycle(_QuestStub):
"""Cycles through the given Quest classes."""
def __init__(self, *quests):
"""Creates a QuestCycle.
Args:
quests: An iterable of Quest intances.
"""
self._execution_classes = itertools.cycle(quests)
def Start(self, change):
return next(self._execution_classes).Start(change)
class QuestByChange(_QuestStub):
"""Uses a different Quest for each Change."""
def __init__(self, change_mapping):
"""Creates a QuestByChange.
Args:
change_mapping: A dict mapping each Change to
the Quest instance to use for that Change.
"""
self._change_mapping = change_mapping
def Start(self, change):
return self._change_mapping[change].Start(change)
class QuestException(_QuestStub):
def Start(self, change):
del change
return execution_test.ExecutionException()
class QuestFail(_QuestStub):
def Start(self, change):
del change
return execution_test.ExecutionFail()
class QuestFail2(_QuestStub):
def Start(self, change):
del change
return execution_test.ExecutionFail2()
class QuestPass(_QuestStub):
def Start(self, change):
del change
return execution_test.ExecutionPass()
class QuestSpin(_QuestStub):
def Start(self, change):
del change
return execution_test.ExecutionSpin()
class QuestTest(unittest.TestCase):
"""Unit tests for quest.py, for coverage."""
def testQuest(self):
with self.assertRaises(NotImplementedError):
str(quest.Quest())
with self.assertRaises(AttributeError):
quest.Quest().Start()
with self.assertRaises(NotImplementedError):
quest.Quest.FromDict({})
|
725927c2edad24c9d6dce70896bac31c77ac51f2
|
a411a55762de11dc2c9d913ff33d2f1477ac02cf
|
/dp/cloud/python/magma/radio_controller/tests/test_utils/db_cbsd_builder.py
|
5bd80362b624b265ddfc5b83b59fde95e66698d0
|
[
"BSD-3-Clause"
] |
permissive
|
magma/magma
|
0dc48c1513d9968bd05fb7589f302c192b7c0f94
|
0e1d895dfe625681229e181fbc2dbad83e13c5cb
|
refs/heads/master
| 2023-09-04T09:31:56.140395
| 2023-08-29T13:54:49
| 2023-08-29T13:54:49
| 170,803,235
| 1,219
| 525
|
NOASSERTION
| 2023-09-07T17:45:42
| 2019-02-15T04:46:24
|
C++
|
UTF-8
|
Python
| false
| false
| 5,259
|
py
|
db_cbsd_builder.py
|
"""
Copyright 2022 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import annotations
import json
from datetime import datetime
from typing import List
from magma.db_service.models import DBCbsd, DBGrant, DBRequest
class DBCbsdBuilder:
def __init__(self):
self.cbsd = DBCbsd()
def build(self) -> DBCbsd:
return self.cbsd
def deleted(self):
self.cbsd.is_deleted = True
return self
def updated(self):
self.cbsd.should_deregister = True
return self
def relinquished(self):
self.cbsd.should_relinquish = True
return self
def with_id(self, db_id: int) -> DBCbsdBuilder:
self.cbsd.id = db_id
return self
def with_state(self, state_id: int) -> DBCbsdBuilder:
self.cbsd.state_id = state_id
return self
def with_registration(self, prefix: str) -> DBCbsdBuilder:
self.cbsd.cbsd_id = f'{prefix}_cbsd_id'
self.cbsd.user_id = f'{prefix}_user_id'
self.cbsd.fcc_id = f'{prefix}_fcc_id'
self.cbsd.cbsd_serial_number = f'{prefix}_serial_number'
return self
def with_eirp_capabilities(
self,
min_power: float, max_power: float,
no_ports: int,
) -> DBCbsdBuilder:
self.cbsd.min_power = min_power
self.cbsd.max_power = max_power
self.cbsd.number_of_ports = no_ports
return self
def with_single_step_enabled(self) -> DBCbsdBuilder:
self.cbsd.single_step_enabled = True
return self
def with_category(self, category: str) -> DBCbsdBuilder:
self.cbsd.cbsd_category = category
return self
def with_antenna_gain(
self,
antenna_gain_dbi: float,
) -> DBCbsdBuilder:
self.cbsd.antenna_gain = antenna_gain_dbi
return self
def with_installation_params(
self,
latitude_deg: float,
longitude_deg: float,
height_m: float,
height_type: str,
indoor_deployment: bool,
) -> DBCbsdBuilder:
self.cbsd.latitude_deg = latitude_deg
self.cbsd.longitude_deg = longitude_deg
self.cbsd.height_m = height_m
self.cbsd.height_type = height_type
self.cbsd.indoor_deployment = indoor_deployment
return self
def with_last_seen(self, last_seen: int) -> DBCbsdBuilder:
self.cbsd.last_seen = datetime.fromtimestamp(last_seen)
return self
def with_desired_state(self, desired_state_id: int) -> DBCbsdBuilder:
self.cbsd.desired_state_id = desired_state_id
return self
def with_preferences(self, bandwidth_mhz: int, frequencies_mhz: List[int]) -> DBCbsdBuilder:
self.cbsd.preferred_bandwidth_mhz = bandwidth_mhz
self.cbsd.preferred_frequencies_mhz = frequencies_mhz
return self
def with_available_frequencies(self, frequencies: List[int]):
self.cbsd.available_frequencies = frequencies
return self
def with_carrier_aggregation(self, enabled: bool) -> DBCbsdBuilder:
self.cbsd.carrier_aggregation_enabled = enabled
return self
def with_max_ibw(self, max_ibw_mhz: int) -> DBCbsdBuilder:
self.cbsd.max_ibw_mhz = max_ibw_mhz
return self
def with_grant_redundancy(self, enabled: bool) -> DBCbsdBuilder:
self.cbsd.grant_redundancy = enabled
return self
def with_grant(
self,
grant_id: str,
state_id: int,
hb_interval_sec: int,
last_hb_timestamp: int = None,
low_frequency: int = 3500,
high_frequency: int = 3700,
) -> DBCbsdBuilder:
last_hb_time = datetime.fromtimestamp(
last_hb_timestamp,
) if last_hb_timestamp else None
grant = DBGrant(
grant_id=grant_id,
state_id=state_id,
heartbeat_interval=hb_interval_sec,
last_heartbeat_request_time=last_hb_time,
low_frequency=low_frequency,
high_frequency=high_frequency,
max_eirp=0,
)
self.cbsd.grants.append(grant)
return self
def with_channel(
self,
low: int, high: int,
max_eirp: float = None,
) -> DBCbsdBuilder:
if not self.cbsd.channels:
# Default is set on commit, so it might be None at this point.
self.cbsd.channels = []
channel = {
"low_frequency": low,
"high_frequency": high,
"max_eirp": max_eirp,
}
self.cbsd.channels = self.cbsd.channels + [channel]
return self
def with_request(self, type_id: int, payload: str) -> DBCbsdBuilder:
request = DBRequest(
type_id=type_id,
payload=json.loads(payload),
)
self.cbsd.requests.append(request)
return self
|
331a1a4786878f63be69e46b2516942168ebfdf4
|
2a1b8a671aceda6bc446f8ce26400aa84fa444a6
|
/Packs/FiltersAndTransformers/Scripts/ParseHTMLTables/ParseHTMLTables.py
|
5370d7be91784154e9a9126394a4eb67a54cf86e
|
[
"MIT"
] |
permissive
|
demisto/content
|
6d4722d46f0ff0beea2748e9f7de585bf91a78b4
|
890def5a0e0ae8d6eaa538148249ddbc851dbb6b
|
refs/heads/master
| 2023-09-04T00:02:25.618032
| 2023-09-03T21:56:22
| 2023-09-03T21:56:22
| 60,525,392
| 1,023
| 1,921
|
MIT
| 2023-09-14T20:55:24
| 2016-06-06T12:17:02
|
Python
|
UTF-8
|
Python
| false
| false
| 12,102
|
py
|
ParseHTMLTables.py
|
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
import copy
from typing import Any, Dict, Generator, List, Optional, Tuple, Union
from bs4 import BeautifulSoup, NavigableString, Tag
TITLE_THRESHOLD = 4
class Table:
def __init__(self, title: str):
self.__title = title
self.__headers: List[str] = []
self.__rows: List[Tuple[List[str], List[str]]] = []
self.__rowspan_labels: List[Tuple[int, str]] = []
def __set_rowspan_labels(self, columns: Optional[List[Tag]]):
if not columns or not any(col.attrs.get('rowspan') for col in columns):
return
rowspan_labels: List[Tuple[int, str]] = []
for col in columns:
try:
rowspan = int(col.attrs.get('rowspan') or 1)
except Exception:
rowspan = 1
rowspan = max(1, rowspan)
try:
colspan = int(col.attrs.get('colspan') or 1)
except Exception:
colspan = 1
colspan = max(1, colspan)
rowspan_labels += [(rowspan, col.text.strip())] * colspan
self.__rowspan_labels = rowspan_labels
def get_title(self) -> str:
return self.__title
def set_header_labels(self, headers: List[Tag]):
self.__headers = [header.text.strip() for header in headers]
def get_header_labels(self) -> List[str]:
return self.__headers
def add_row(self, columns: List[Tag], labels: Optional[List[Tag]] = None):
"""
Add a row with cells and labels.
:param columns: List of data cells of the row.
:param labels: List of header cells of the row.
"""
rowspan_labels = self.__rowspan_labels
# Normalize labels
if labels and any(label.attrs.get('rowspan') for label in labels):
self.__set_rowspan_labels(labels)
normalized_labels = []
if labels:
for i, (count, label) in enumerate(rowspan_labels):
if count >= 2:
normalized_labels.append(label)
for label in labels:
try:
colspan = int(label.attrs.get('colspan') or 1)
except Exception:
colspan = 1
normalized_labels += [label.text.strip()] * max(1, colspan)
# Normalize columns
if any(col.attrs.get('rowspan') for col in columns):
self.__set_rowspan_labels(columns)
normalized_columns = []
for i, (count, label) in enumerate(rowspan_labels):
if count >= 2:
normalized_columns.append(label)
rowspan_labels[i] = count - 1, label
for col in columns:
try:
colspan = int(col.attrs.get('colspan') or 1)
except Exception:
colspan = 1
normalized_columns += [col.text.strip()] * max(1, colspan)
self.__rows.append((normalized_labels, normalized_columns))
def get_rows(self) -> List[Tuple[List[str], List[str]]]:
return self.__rows
def make_pretty_table_rows(self, default_header_line: Optional[str] = None) -> Any:
"""
Format a table
:param default_header_line: Which table line handles as header by default, 'first_column' or 'first_row'
:return: The table formatted in JSON structure.
"""
rows: List[Union[str, Dict[str, Any]]] = []
temp_row: Dict[str, Any] = {}
tbl_rows = self.__rows
headers = self.__headers
if default_header_line and default_header_line != 'none':
if not headers and not any(labels for labels, cols in tbl_rows):
if default_header_line in ('first_column', 'first_row'):
# The first column or row is considered as header
if default_header_line == 'first_column':
# transpose
tbl_rows = [([], list(cols)) for cols in zip(*[cols for labels, cols in tbl_rows])]
labels, headers = tbl_rows[0]
tbl_rows = tbl_rows[1:]
else:
raise ValueError(f'Unknown default header line: {default_header_line}')
for labels, cols in tbl_rows:
labels = labels[-1:]
headers = labels + headers[len(labels):len(headers) - len(labels)]
if not cols:
continue
elif len(cols) == 1:
if len(headers) >= 1:
# If there 1 header and 1 column, treat as key-value
key = headers[0]
vals = temp_row.get(key)
if vals is None:
temp_row[key] = cols[0]
elif type(vals) == list:
temp_row[key] = vals + [cols[0]]
else:
temp_row[key] = [vals, cols[0]]
else:
if temp_row:
rows.append(temp_row)
temp_row = {}
# Single value in a table - just create an array of strings
rows.append(cols[0])
elif len(cols) == 2 and len(headers) == 0:
# If there are 2 columns and no headers, treat as key-value
key = cols[0]
vals = temp_row.get(key)
if vals is None:
temp_row[key] = cols[1]
elif type(vals) == list:
temp_row[key] = vals + [cols[1]]
else:
temp_row[key] = [vals, cols[1]]
else:
if temp_row:
rows.append(temp_row)
temp_row = {}
rows.append({headers[i] if i < len(headers) else 'cell' + str(i): col for i, col in enumerate(cols)})
if temp_row:
rows.append(temp_row)
if len(rows) == 1 and type(rows[0]) == dict:
return rows[0]
return rows
def find_table_title(base: Optional[Union[BeautifulSoup, Tag, NavigableString]],
node: Union[BeautifulSoup, Tag, NavigableString]) -> Optional[str]:
"""
Search for a table title from a node.
:param base: The top node of the tree.
:param node: The node from which searching starts.
:return: A title found.
"""
title = ''
orig = node
prev = node.previous_element
while prev and node is not base:
node = prev
if isinstance(node, Tag) and node.name in ('h1', 'h2', 'h3', 'h4', 'h5', 'h6'):
title = ' '.join(node.text.strip().split())
break
prev = node.previous_element
if not title or title.count(' ') >= TITLE_THRESHOLD:
message = ''
node = orig
prev = node.previous_element
while prev and node is not base:
node = prev
if isinstance(node, NavigableString):
message = (str(node) if message else str(node).rstrip()) + message
if message.lstrip() and any(c in message for c in ('\n', '\r')):
break
prev = node.previous_element
message = ' '.join(message.strip().split())
title = title if title and message.count(' ') >= title.count(' ') else message
return title
def list_columns(node: Union[BeautifulSoup, Tag, NavigableString], name: str) -> List[Tag]:
"""
List columns of the row.
:param node: The node which contains columns of the row.
:param name: The name of the tag of columns.
:return: The list of columns.
"""
vals = []
ancestor = node
name_list = ['table', 'td', 'th', name]
node = node.find(name_list)
while node and is_descendant(ancestor, node):
if node.name in name_list:
if node.name == name:
tnode = copy.copy(node)
for t in tnode.find_all('table'):
t.decompose()
vals.append(tnode)
node = node.find_next_sibling(True)
else:
node = node.find_next(name_list)
return vals
def is_descendant(ancestor: Optional[Union[BeautifulSoup, Tag, NavigableString]],
node: Optional[Union[BeautifulSoup, Tag, NavigableString]]) -> bool:
"""
Check if a node is descendant in the tree.
:param ancestor: The ancestor node.
:param node: The node to be checked.
:return: True - node is descendant, False - node is not descendant.
"""
return ancestor is not None and node is not None and any([ancestor is p for p in node.parents])
def parse_table(base: Optional[Union[BeautifulSoup, Tag, NavigableString]],
table_node: Union[BeautifulSoup, Tag, NavigableString]) -> Generator[Table, None, None]:
"""
Parse a HTML table and enumerate tables found in the table.
:param base: The top node of the HTML tree.
:param table_node: The table node to parse.
:return: Tables found.
"""
table = Table(title=find_table_title(base, table_node) or 'No Title')
has_nested_tables = False
node = table_node.find(['table', 'tr'])
while node and is_descendant(table_node, node):
if node.name == 'tr':
ths = list_columns(node, 'th')
tds = list_columns(node, 'td')
if tds:
table.add_row(columns=tds, labels=ths)
if ths and not table.get_header_labels():
table.set_header_labels(ths)
node = node.find_next(['table', 'tr'])
elif node.name == 'table':
has_nested_tables = True
yield from parse_table(base, node)
base = node.previous_element
node = node.find_next_sibling(True)
else:
node = node.find_next(['table', 'tr'])
# Not to make a table if tr only has tables
has_table = True
if has_nested_tables:
rows = table.get_rows()
if len(rows) == 1:
labels, cols = rows[0]
if len(cols) == 1 and not cols[0]:
has_table = False
if has_table:
yield table
def parse_tables(node: Union[BeautifulSoup, Tag, NavigableString]) -> Generator[Table, None, None]:
"""
Parse HTML tables and enumerate them.
:param node: The node from which searching starts.
:return: Tables found.
"""
base = None
node = node.find('table')
while node:
yield from parse_table(base, node)
base = node.next_sibling
while node:
next = node.find_next_sibling(True)
if next:
if next.name == 'table':
break
next = next.find_next('table')
if next:
break
node = node.parent
node = next
def main():
args = demisto.args()
html = args.get('value') or ''
overwriting_title = args.get('title')
filter_indexes = argToList(args.get('filter_indexes'))
filter_titles = argToList(args.get('filter_titles'))
default_header_line = args.get('default_header_line') or 'none'
tables = []
try:
soup = BeautifulSoup(html, 'html.parser')
index = -1
for table in parse_tables(soup):
rows = table.make_pretty_table_rows(default_header_line)
if not rows:
continue
index = index + 1
if filter_indexes and\
index not in filter_indexes and\
str(index) not in filter_indexes:
continue
original_title = table.get_title()
if filter_titles and original_title not in filter_titles:
continue
tables.append({overwriting_title or original_title: rows})
except Exception as err:
# Don't return an error by return_error() as this is transformer.
raise DemistoException(str(err))
return_results(tables)
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
d2fa12d718ae10b7abdc0e8f5b8c77b06fb2e4d8
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/codeInsight/mlcompletion/receiverMatchesSimple.py
|
88297749105971b3715df9f1441cd5e9f844e396
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 59
|
py
|
receiverMatchesSimple.py
|
def foo(someParam: int):
pass
someParam = 1
foo(<caret>)
|
ec882c956a8039f40be96c4220ccd13455c3b937
|
f80ef3a3cf859b13e8af8433af549b6b1043bf6e
|
/pyobjc-framework-libdispatch/PyObjCTest/test_workgroup_interval.py
|
34e58a78000011dc7f5d43c95680174a35f607d1
|
[
"MIT"
] |
permissive
|
ronaldoussoren/pyobjc
|
29dc9ca0af838a56105a9ddd62fb38ec415f0b86
|
77b98382e52818690449111cd2e23cd469b53cf5
|
refs/heads/master
| 2023-09-01T05:15:21.814504
| 2023-06-13T20:00:17
| 2023-06-13T20:00:17
| 243,933,900
| 439
| 49
| null | 2023-06-25T02:49:07
| 2020-02-29T08:43:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,235
|
py
|
test_workgroup_interval.py
|
import dispatch
import objc
from PyObjCTools.TestSupport import TestCase, min_os_level
class TestWorkgroupIntervalAPI(TestCase):
@min_os_level("11.0")
def test_functions(self):
self.assertResultHasType(dispatch.os_workgroup_interval_start, objc._C_INT)
self.assertArgHasType(dispatch.os_workgroup_interval_start, 0, objc._C_ID)
self.assertArgHasType(dispatch.os_workgroup_interval_start, 1, objc._C_ULNG_LNG)
self.assertArgHasType(dispatch.os_workgroup_interval_start, 2, objc._C_ULNG_LNG)
self.assertArgHasType(dispatch.os_workgroup_interval_start, 3, objc._C_ID)
self.assertResultHasType(dispatch.os_workgroup_interval_update, objc._C_INT)
self.assertArgHasType(dispatch.os_workgroup_interval_update, 0, objc._C_ID)
self.assertArgHasType(
dispatch.os_workgroup_interval_update, 1, objc._C_ULNG_LNG
)
self.assertArgHasType(dispatch.os_workgroup_interval_update, 2, objc._C_ID)
self.assertResultHasType(dispatch.os_workgroup_interval_finish, objc._C_INT)
self.assertArgHasType(dispatch.os_workgroup_interval_finish, 0, objc._C_ID)
self.assertArgHasType(dispatch.os_workgroup_interval_finish, 1, objc._C_ID)
|
4de6f1ade8c0d8fbcbad918c8a3ea58253da7b31
|
767b09cdf51803d533ebb5906042ed1f92f91a7c
|
/tests/structured_prediction/models/semantic_role_labeling_test.py
|
1eb998d97d49251a36f11a9b4cb1deecf08daa5c
|
[
"Apache-2.0"
] |
permissive
|
allenai/allennlp-models
|
e93bb3b084e99e211d5ebb515b765de117e41970
|
b1f372248c17ad12684d344955fbcd98e957e77e
|
refs/heads/main
| 2023-09-05T01:57:37.434101
| 2022-11-24T00:06:05
| 2022-11-24T00:06:05
| 246,170,605
| 520
| 172
|
Apache-2.0
| 2022-11-24T00:06:06
| 2020-03-10T00:22:21
|
Python
|
UTF-8
|
Python
| false
| false
| 4,553
|
py
|
semantic_role_labeling_test.py
|
import subprocess
import os
from flaky import flaky
import pytest
import numpy
from allennlp.common.testing import ModelTestCase
from allennlp.common.params import Params
from allennlp.common.checks import ConfigurationError
from allennlp.models import Model
from allennlp.nn.util import get_lengths_from_binary_sequence_mask
from allennlp.data.dataset_readers.dataset_utils.span_utils import to_bioul
from tests import FIXTURES_ROOT, PROJECT_ROOT
class SemanticRoleLabelerTest(ModelTestCase):
def setup_method(self):
super().setup_method()
self.set_up_model(
FIXTURES_ROOT / "structured_prediction" / "srl" / "experiment.json",
FIXTURES_ROOT / "structured_prediction" / "srl" / "conll_2012",
)
def test_srl_model_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(self.param_file)
@flaky
def test_batch_predictions_are_consistent(self):
self.ensure_batch_predictions_are_consistent()
def test_forward_pass_runs_correctly(self):
training_tensors = self.dataset.as_tensor_dict()
output_dict = self.model(**training_tensors)
class_probs = output_dict["class_probabilities"][0].data.numpy()
numpy.testing.assert_almost_equal(
numpy.sum(class_probs, -1), numpy.ones(class_probs.shape[0]), decimal=6
)
def test_decode_runs_correctly(self):
training_tensors = self.dataset.as_tensor_dict()
output_dict = self.model(**training_tensors)
decode_output_dict = self.model.make_output_human_readable(output_dict)
lengths = get_lengths_from_binary_sequence_mask(decode_output_dict["mask"]).data.tolist()
# Hard to check anything concrete which we haven't checked in the above
# test, so we'll just check that the tags are equal to the lengths
# of the individual instances, rather than the max length.
for prediction, length in zip(decode_output_dict["tags"], lengths):
assert len(prediction) == length
# Checks that the output is a well formed BIO sequence,
# as otherwise an exception is thrown.
to_bioul(prediction, encoding="BIO")
def test_bio_tags_correctly_convert_to_conll_format(self):
bio_tags = ["B-ARG-1", "I-ARG-1", "O", "B-V", "B-ARGM-ADJ", "O"]
from allennlp_models.structured_prediction.models.srl import (
convert_bio_tags_to_conll_format,
)
conll_tags = convert_bio_tags_to_conll_format(bio_tags)
assert conll_tags == ["(ARG-1*", "*)", "*", "(V*)", "(ARGM-ADJ*)", "*"]
def test_perl_eval_script_can_run_on_printed_conll_files(self):
bio_tags = ["B-ARG-1", "I-ARG-1", "O", "B-V", "B-ARGM-ADJ", "O"]
sentence = ["Mark", "and", "Matt", "were", "running", "fast", "."]
gold_file_path = os.path.join(self.TEST_DIR, "gold_conll_eval.txt")
prediction_file_path = os.path.join(self.TEST_DIR, "prediction_conll_eval.txt")
with open(gold_file_path, "a+") as gold_file, open(
prediction_file_path, "a+"
) as prediction_file:
# Use the same bio tags as prediction vs gold to make it obvious by looking
# at the perl script output if something is wrong. Write them twice to
# ensure that the perl script deals with multiple sentences.
from allennlp_models.structured_prediction.models.srl import (
write_bio_formatted_tags_to_file,
)
write_bio_formatted_tags_to_file(
gold_file, prediction_file, 4, sentence, bio_tags, bio_tags
)
write_bio_formatted_tags_to_file(
gold_file, prediction_file, 4, sentence, bio_tags, bio_tags
)
perl_script_command = [
"perl",
str(
PROJECT_ROOT / "allennlp_models" / "structured_prediction" / "tools" / "srl-eval.pl"
),
prediction_file_path,
gold_file_path,
]
exit_code = subprocess.check_call(perl_script_command)
assert exit_code == 0
def test_mismatching_dimensions_throws_configuration_error(self):
params = Params.from_file(self.param_file)
# Make the phrase layer wrong - it should be 150 to match
# the embedding + binary feature dimensions.
params["model"]["encoder"]["input_size"] = 10
with pytest.raises(ConfigurationError):
Model.from_params(vocab=self.vocab, params=params.pop("model"))
|
3069bfc1e38881102b4489d2fd0577154faa72d5
|
c475cd8531a94ffae69cc92371d41531dbbddb6c
|
/Projects/bullet3-2.89/examples/pybullet/unittests/utils.py
|
b6ef61d41a6dda1c92f71223e3c201290092f417
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown",
"Zlib"
] |
permissive
|
WolfireGames/overgrowth
|
72d3dd29cbd7254337265c29f8de3e5c32400114
|
594a2a4f9da0855304ee8cd5335d042f8e954ce1
|
refs/heads/main
| 2023-08-15T19:36:56.156578
| 2023-05-17T08:17:53
| 2023-05-17T08:20:36
| 467,448,492
| 2,264
| 245
|
Apache-2.0
| 2023-05-09T07:29:58
| 2022-03-08T09:38:54
|
C++
|
UTF-8
|
Python
| false
| false
| 285
|
py
|
utils.py
|
def dot(A, b):
"""Dot product between a 2D matrix and a 1D vector"""
return [sum([aij * bj for aij, bj in zip(ai, b)]) for ai in A]
def allclose(a, b, tol=1e-7):
"""Are all elements of a vector close to one another"""
return all([abs(ai - bi) < tol for ai, bi in zip(a, b)])
|
f11ce038753c2e2c0e3138946eb0950d003bdc86
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/helpers/pydev/_pydevd_asyncio_util/pydevd_asyncio_utils.py
|
b3ed912401b0e8146487b79bc55260ea879f3058
|
[
"EPL-1.0",
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 5,475
|
py
|
pydevd_asyncio_utils.py
|
# Copyright 2000-2022 JetBrains s.r.o. and contributors. Use of this source code is governed by the Apache 2.0 license.
from _pydevd_bundle.pydevd_constants import IS_ASYNCIO_DEBUGGER_ENV, IS_ASYNCIO_REPL
from _pydevd_bundle.pydevd_exec2 import Exec
from _pydev_bundle.pydev_log import warn
eval_async_expression_in_context = None
eval_async_expression = None
exec_async_code = None
asyncio_command_compiler = None
if IS_ASYNCIO_DEBUGGER_ENV or IS_ASYNCIO_REPL:
from _pydevd_bundle import pydevd_save_locals
from _pydevd_asyncio_util.pydevd_nest_asyncio import apply, PyDevCoro
from codeop import CommandCompiler
import ast, types, inspect, asyncio
FILENAME = '<string>'
EVAL_SYMBOL = 'eval'
EXEC_SYMBOL = 'exec'
MODULE = '<module>'
_asyncio_command_compiler = CommandCompiler()
_asyncio_command_compiler.compiler.flags |= ast.PyCF_ALLOW_TOP_LEVEL_AWAIT
def _compile_async_expression(expression, do_exec):
"""
Compile an expression with 'eval' or 'exec' compilation flag.\n
Can compile an expression with `await` outside function.
:param str expression: compilation target
:param bool do_exec: if True then compilation flag is 'exec' else 'eval'
:return: a pair of compilation result and compilation flag
:raises (OverflowError, SyntaxError, ValueError): if compilation failed
"""
compilation_flag = EVAL_SYMBOL
if do_exec:
try:
compiled = asyncio_command_compiler(expression, FILENAME, compilation_flag)
except (OverflowError, SyntaxError, ValueError):
compilation_flag = EXEC_SYMBOL
compiled = asyncio_command_compiler(expression, FILENAME, compilation_flag)
else:
compiled = asyncio_command_compiler(expression, FILENAME, compilation_flag)
return compiled, compilation_flag
def _eval_async_expression_in_context(expression, global_names, local_names, do_exec):
"""
Compile an expression and if the compilation result is coroutine then put it in asyncio event loop else evaluate.\n
Can evaluate an expression with `await` outside function.
:param str expression: evaluation target
:param global_names: the dictionary implementing the current module namespace
:param local_names: the dictionary representing the current local symbol table
:param bool do_exec: if True then the compilation flag is 'exec' else 'eval'
:return: evaluation result
:raises (OverflowError, SyntaxError, ValueError): if a compilation failed
"""
apply()
updated_globals = {}
updated_globals.update(global_names)
updated_globals.update(local_names)
compiled, _ = _compile_async_expression(expression, do_exec)
return exec_async_code(compiled, updated_globals)
def _eval_async_expression(expression, global_names, frame, do_exec, exception_handler):
"""
Compile an expression and if the compilation result is coroutine then put it in asyncio event loop else evaluate.\n
Can evaluate an expression with `await` outside function.
:param str expression: evaluation target
:param global_names: the dictionary implementing the current module namespace and the current local symbol table
:param frame: the current frame
:param bool do_exec: if True then the compilation flag is 'exec' else 'eval'
:param exception_handler: handle an exception thrown at compile time
:return: evaluation result or exception string
"""
apply()
locals = frame.f_locals
try:
compiled, compilation_flag = _compile_async_expression(expression, do_exec)
if compiled is None:
try:
compile(expression, FILENAME, compilation_flag, asyncio_command_compiler.compiler.flags)
except (OverflowError, SyntaxError, ValueError):
return exception_handler(expression, locals)
result = exec_async_code(compiled, global_names)
if compilation_flag == EXEC_SYMBOL:
Exec(expression, global_names, frame.f_locals)
pydevd_save_locals.save_locals(frame)
return result
except (OverflowError, SyntaxError, ValueError):
return exception_handler(expression, locals)
def _exec_async_code(code, global_names):
"""
If code is coroutine then put it in an asyncio event loop else evaluate
:param code: evaluation target
:param global_names: the dictionary implementing the current module namespace
:return: evaluation result
"""
try:
apply()
except:
warn('Failed to patch asyncio')
func = types.FunctionType(code, global_names)
result = func()
try:
if inspect.iscoroutine(result) and MODULE in str(result):
loop = asyncio.get_event_loop()
result = loop.run_until_complete(PyDevCoro(result))
except:
warn('Failed to run coroutine %s' % str(result))
finally:
return result
eval_async_expression_in_context = _eval_async_expression_in_context
eval_async_expression = _eval_async_expression
exec_async_code = _exec_async_code
asyncio_command_compiler = _asyncio_command_compiler
|
270ed6188babf984bdd0399b42723895ee13a524
|
48ca6f9f041a1e9f563500c8a7fa04dbb18fa949
|
/pygears/sim/extens/websim.py
|
c548767d9bc56a789b12e74d4c931124b1a66f0d
|
[
"MIT"
] |
permissive
|
bogdanvuk/pygears
|
71404e53d4689ec9cdd9db546bfc0f229a7e02da
|
705b11ab6de79868b25753fa9d0ce7128791b346
|
refs/heads/master
| 2023-07-08T11:38:54.625172
| 2022-03-07T12:29:00
| 2022-03-07T12:29:00
| 124,890,922
| 146
| 16
|
MIT
| 2022-08-15T07:57:08
| 2018-03-12T13:10:06
|
Python
|
UTF-8
|
Python
| false
| false
| 24,032
|
py
|
websim.py
|
from enum import IntEnum
import time
import multiprocessing
from pygears.core.hier_node import HierYielderBase
from pygears.conf import inject, Inject
import os
from pygears.typing import typeof
from pygears import find, reg
from pygears.hdl import HDLPlugin
from pygears.hdl.sv.intf import SVIntfGen
from pygears.core.gear import Gear, Intf
from pygears.core.port import HDLConsumer, OutPort, Port, InPort
from pygears.sim import timestep
from .sim_extend import SimExtend
from pygears.core.hier_node import HierVisitorBase
from pygears.sim.modules import SimVerilated, SimSocket
multiprocessing.set_start_method('fork')
VALUE = set(('0', '1', 'x', 'X', 'z', 'Z'))
INVALID_VALUE = set(('x', 'X', 'z', 'Z'))
VECTOR_VALUE_CHANGE = set(('b', 'B', 'r', 'R'))
VALUE_CHANGE = VALUE | VECTOR_VALUE_CHANGE
@inject
def find_cosim_modules(top=Inject('gear/root')):
class CosimVisitor(HierVisitorBase):
@inject
def __init__(self, sim_map=Inject('sim/map')):
self.sim_map = sim_map
self.cosim_modules = []
def Gear(self, module):
if isinstance(self.sim_map.get(module, None), SimVerilated):
self.cosim_modules.append(self.sim_map[module])
return True
v = CosimVisitor()
v.visit(top)
return v.cosim_modules
class ChannelState(IntEnum):
Invalid = 0
NotReady = 1
Ready = 2
Done = 3
Awaiting = 4
import functools
@functools.lru_cache(maxsize=None)
def subtypes(dtype):
return tuple((t, t.width) for t in dtype)
def split_coded_dtype(t, val):
for subt, subt_width in t:
subt_mask = (1 << subt_width) - 1
yield val & subt_mask
val >>= subt_width
def split_coded_change(t, val1, val2):
for subt, subt_width in subtypes(t):
subt_mask = (1 << subt_width) - 1
subval1 = val1 & subt_mask
val1 >>= subt_width
if val2 is not None:
subval2 = val2 & subt_mask
val2 >>= subt_width
else:
subval2 = None
yield subt, subval1, subval2
def create_data_change(t, val, prev_val):
from pygears.typing import Queue, Array, Tuple, Union
is_changed = prev_val is None and val is not None or val != prev_val
if typeof(t, (Queue, Array, Tuple, Union)):
change = [
create_data_change(subt, v, prev_v)
for subt, v, prev_v in split_coded_change(t, val, prev_val)
]
# return {'isValueComplex': True, 'isDataChanged': is_changed, 'value': change}
return (1, int(is_changed), change)
else:
if isinstance(val, (int, float)):
val = int(val)
return (0, int(is_changed), val)
# return {'isValueComplex': False, 'isDataChanged': is_changed, 'value': val}
class VcdToJson:
def __init__(self):
self.json_vcd = {}
self.diff = {}
self.state = {}
self.value = {}
def create_change(self, timestep, state, state_change, t, val, prev_val):
# elem = {
# 'cycle': timestep,
# 'state': int(state),
# 'isStateChanged': state_change,
# }
elem = [timestep, int(state), int(state_change)]
if val is not None:
elem.append(create_data_change(t, val, prev_val))
# elem['data'] = create_data_change(t, val, prev_val)
import json
# print(json.dumps(elem, separators=(',', ':')))
return json.dumps(elem, separators=(',', ':'))
def after_timestep(self, timestep):
for p, d in self.diff.items():
changes = self.json_vcd[p.name]
data_change = False
new_state = state = self.state[p]
prev_val = self.value[p]
new_val = d['d']
# data_change = new_val is not None and (prev_val or prev_val != new_val)
# if data_change:
# new_val = p.dtype.decode(d['d'])
# else:
# new_val = None
if state == ChannelState.Invalid:
if d['v'] and d['r']:
new_state = ChannelState.Ready
data_change = True
elif d['v']:
new_state = ChannelState.NotReady
data_change = True
elif d['r']:
new_state = ChannelState.Awaiting
elif state == ChannelState.Ready:
if d['r'] == 0 and d['v'] == 0:
new_state = ChannelState.Invalid
elif d['r'] == 0:
new_state = ChannelState.NotReady
data_change = prev_val != new_val
elif d['v'] == 0:
new_state = ChannelState.Awaiting
else:
data_change = prev_val != new_val
elif state == ChannelState.NotReady:
if d['r']:
new_state = ChannelState.Ready
elif state == ChannelState.Awaiting:
if d['r'] == 0 and d['v']:
new_state = ChannelState.NotReady
data_change = prev_val != new_val
elif d['v']:
new_state = ChannelState.Ready
data_change = prev_val != new_val
new_state_json = new_state
if new_state_json == ChannelState.Awaiting:
new_state_json = ChannelState.Invalid
state_json = state
if state_json == ChannelState.Awaiting:
state_json = ChannelState.Invalid
if new_state_json != state_json or data_change or timestep == 0:
cycle_change = self.create_change(
timestep,
new_state_json,
state_change=(new_state_json != state_json),
t=p.dtype,
val=(None if new_state_json == ChannelState.Invalid else new_val),
prev_val=prev_val)
if cycle_change is not None:
changes.append(cycle_change)
if data_change:
self.value[p] = new_val
self.state[p] = new_state
# self.diff.clear()
def follow(fn, finish_event=None, sleep_sec=0.1):
""" Yield each line from a file as they are written.
`sleep_sec` is the time to sleep after empty reads. """
line = ''
with open(fn) as f:
while True:
tmp = f.readline()
if tmp:
line += tmp
if line.endswith("\n"):
yield line
line = ''
else:
if finish_event is None or finish_event.is_set():
return
time.sleep(sleep_sec)
# def follow(fn, finish_event, sleep_sec=0.5):
# import time
# import subprocess
# import select
# f = subprocess.Popen(['tail', '-F', fn], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# p = select.poll()
# p.register(f.stdout)
# while True:
# if p.poll(sleep_sec):
# yield f.stdout.readline().decode()
# elif finish_event.is_set():
# return
# else:
# # print(f'Sleep')
# time.sleep(sleep_sec)
def vcd_to_json_worker(entries, wire_map: dict, vcd_conv, t):
for identifier_code, value in entries:
if identifier_code not in wire_map:
continue
for port, wire_name in wire_map[identifier_code]:
# print(f'"{identifier_code}": {port.name}_{wire_name} change @ {t}')
if port not in vcd_conv.diff:
vcd_conv.diff[port] = {'r': None, 'v': None, 'd': None}
if wire_name == 'valid':
vcd_conv.diff[port]['v'] = value
elif wire_name == 'ready':
vcd_conv.diff[port]['r'] = value
elif wire_name == 'data' and value is not None:
vcd_conv.diff[port]['d'] = value
vcd_conv.after_timestep(t)
def vcd_to_json(top, file_iter):
wire_map = {}
traced_channels = set()
vcd_conv = None
skip_scope = 0
bc_parent = None
t = -1
hier = None
worker_data = []
for line in file_iter:
# for line in f:
line = line.strip()
if line == '':
continue
line_head = line[0]
if line_head == '#':
next_t = int(line[1:])
if next_t % 10 != 0:
continue
next_t //= 10
if t == -1:
vcd_conv = VcdToJson()
for identifier_code in wire_map:
for p, _ in wire_map[identifier_code]:
vcd_conv.state[p] = ChannelState.Invalid
vcd_conv.value[p] = None
vcd_conv.json_vcd[p.name] = []
if t >= 0:
vcd_to_json_worker(worker_data, wire_map, vcd_conv, t)
worker_data.clear()
t = next_t
elif line_head in VALUE_CHANGE:
if line_head in VECTOR_VALUE_CHANGE:
value, identifier_code = line[1:].split()
elif line_head in VALUE:
value = line[0]
identifier_code = line[1:]
if value[0] in INVALID_VALUE:
value = None
elif value[0] not in ('b', 'B'):
value = int(value, 2)
else:
value = int(value[1:])
if identifier_code in wire_map:
worker_data.append((identifier_code, value))
elif '$enddefinitions' in line:
pass
elif '$scope' in line:
if skip_scope:
skip_scope += 1
continue
segs = line.split()
# Name of the TOP is ' '
if len(segs) == 3:
scope_name = ''
else:
scope_name = segs[2]
child = None
if scope_name.startswith('_') and scope_name.endswith('_spy'):
scope_name = scope_name[1:-4]
if hier is None:
top_maybe_sv_mod = reg['hdlgen/map'].get(top, top)
if scope_name == top_maybe_sv_mod.basename or scope_name == getattr(top_maybe_sv_mod, 'inst_name'):
hier = top
continue
if scope_name.startswith('bc_'):
skip_scope = 1
continue
elif '_bc_' in scope_name:
scope_name, _, bc_dout_id = scope_name.rpartition('_')
scope_name = scope_name[1:-3]
for i in hier.local_intfs:
svmod = reg['hdlgen/map'].get(i, i)
if svmod.basename == scope_name:
child = i.consumers[int(bc_dout_id)]
bc_parent = hier
break
else:
breakpoint()
else:
child = None
for c in hier.child + hier.local_intfs:
svmod = reg['hdlgen/map'].get(c, None)
if svmod is None:
continue
if isinstance(svmod, SVIntfGen):
if svmod.basename == scope_name:
child = c
break
elif svmod.inst_name == scope_name:
child = c
break
if child is None:
child = find(f'{hier.name}.{scope_name}')
if child is not None and hier.parent is not None and not node_hierarchical(
hier.parent):
skip_scope = 1
continue
if not isinstance(child, Port):
child = None
if child is None:
child = find(f'{hier.name}/{scope_name}')
if child is None:
skip_scope = 1
continue
hier = child
elif '$upscope' in line:
if skip_scope:
skip_scope -= 1
continue
if hier is top:
hier = None
elif bc_parent is not None:
hier = bc_parent
bc_parent = None
elif isinstance(hier, (Gear, Intf)):
if hier is not top:
hier = hier.parent
elif hier is not None:
hier = hier.gear
elif '$var' in line:
if hier is top or skip_scope:
continue
if not isinstance(hier, (Port, Intf)):
continue
# Output ports are not monitored for non-hierarchical gears, since
# the output channel will be picked up by the consumer.
# UNLESS:
# - It is a hierarchical gear, which means there is a part of
# the interface inside the hierarchical gear
# - It is an output port which is connected to a broadcast
if (isinstance(hier, Port) and not isinstance(hier, InPort)
and not node_hierarchical(hier.gear) and not len(hier.consumer.consumers) > 1):
continue
ls = line.split()
identifier_code = ls[3]
name = ls[4]
# name = ''.join(ls[4:-1])
# print(f'Var "{identifier_code}": {hier.name}_{name}')
if identifier_code not in wire_map:
wire_map[identifier_code] = []
if isinstance(hier, OutPort) and len(hier.consumer.consumers) > 1:
channel = hier.consumer
else:
channel = hier
if (channel.name, name) not in traced_channels:
wire_map[identifier_code].append((channel, name))
traced_channels.add((channel.name, name))
elif '$timescale' in line:
continue
if vcd_conv is None:
return None
return vcd_conv.json_vcd
def vcd_to_json_task(vcd_fn, finish_event, ret_pipe, top):
import os
import time
while not os.path.exists(vcd_fn):
time.sleep(0.1)
time.sleep(0.1)
json_vcd = vcd_to_json(top, follow(vcd_fn, finish_event))
ret_pipe.send(json_vcd)
def dtype_tree(dtype):
from pygears.typing import Queue, Array, Tuple, Union
if issubclass(dtype, Array):
return {'name': repr(dtype), 'subtypes': [dtype_tree(t) for t in dtype]}
elif issubclass(dtype, (Queue, Array, Tuple, Union)):
return {
'name': repr(dtype),
'subtypes': [dtype_tree(t) for t in dtype],
'keys': dtype.fields
}
else:
return {'name': repr(dtype)}
def node_hierarchical(node):
return node.hierarchical and node.meta_kwds.get('hdl', {}).get('hierarchical', True)
def dump_json_graph(top):
nodes = {}
ports = {}
connections = {}
class NodeYielder(HierYielderBase):
def Gear(self, node):
yield node
return not node_hierarchical(node)
for node in NodeYielder().visit(find(top)):
in_ports = []
for p in node.in_ports:
ports[p.name] = {
'basename': p.basename,
'name': p.name,
'dtype': repr(p.dtype),
'index': p.index
}
in_ports.append(p.name)
out_ports = []
for p in node.out_ports:
ports[p.name] = {
'basename': p.basename,
'name': p.name,
'dtype': repr(p.dtype),
'index': p.index
}
out_ports.append(p.name)
node_json = {
'basename': node.basename,
'name': node.name,
'definition': 'None' if node.definition is None else
f'{node.definition.func.__module__}.{node.definition.func.__name__}',
'in_ports': in_ports,
'out_ports': out_ports
}
nodes[node.name] = node_json
for node in NodeYielder().visit(find(top)):
nodes[node.name]['child'] = []
if node_hierarchical(node): # node.hierarchical:
for c in node.child:
nodes[node.name]['child'].append(c.name)
for node in NodeYielder().visit(find(top)):
for p in node.in_ports + node.out_ports:
if p.consumer is None or any(isinstance(c, HDLConsumer) for c in p.consumer.consumers):
continue
if isinstance(p, InPort) and not node_hierarchical(node):
continue
i = p.consumer
connections[i.name] = {
'producer': i.producer.name,
'dtype': repr(p.dtype),
'dtype_tree': dtype_tree(p.dtype),
'name': i.name
}
consumers = [pc.name for pc in i.consumers if not isinstance(pc, HDLConsumer)]
if len(consumers) <= 1:
connections[i.name]['consumers'] = consumers
for c in consumers:
ports[c]['producer'] = i.name
else:
parent = node if node_hierarchical(node) and isinstance(p, InPort) else node.parent
bc_basename = f'{i.basename}_bc'
bc_name = f'{parent.name}/{bc_basename}'
inp_name = f'{bc_name}.din'
ports[f'{bc_name}.din'] = {
'basename': 'din',
'name': inp_name,
'producer': i.name,
'dtype': repr(p.dtype),
'index': 0
}
in_ports = [inp_name]
connections[i.name]['consumers'] = [inp_name]
out_ports = []
for c in range(len(consumers)):
outp_name = f'{bc_name}.dout{c}'
ports[outp_name] = {
'basename': f'dout{c}',
'name': outp_name,
'consumer': outp_name,
'dtype': repr(p.dtype),
'index': c
}
connections[outp_name] = {
'producer': outp_name,
'dtype': repr(p.dtype),
'dtype_tree': dtype_tree(p.dtype),
'name': outp_name,
'consumers': [consumers[c]]
}
ports[consumers[c]]['producer'] = outp_name
out_ports.append(outp_name)
node_json = {
'basename': bc_basename,
'name': bc_name,
'definition': 'broadcast',
'in_ports': in_ports,
'out_ports': out_ports,
'child': []
}
nodes[parent.name]['child'].append(bc_name)
nodes[bc_name] = node_json
# if not all(isinstance(p, HDLConsumer) for p in i.consumers):
ports[i.producer.name]['consumer'] = i.name
return {'gears': nodes, 'ports': ports, 'connections': connections}
class WebSim(SimExtend):
@inject
def __init__(self, trace_fn='pygears.json', outdir=Inject('results-dir'), multiprocess=True):
super().__init__()
self.outdir = outdir
self.trace_fn = os.path.abspath(os.path.join(self.outdir, trace_fn))
self.multiprocess = multiprocess
self.cosim_modules = []
self.p = []
# atexit.register(self.finish)
self.vcd_fn = os.path.abspath(os.path.join(self.outdir, 'pygears.vcd'))
self.finished = False
def register_vcd_worker(self, vcd_fn, top):
qin, qout = multiprocessing.Pipe(duplex=False)
self.qin.append(qin)
p = multiprocessing.Process(target=vcd_to_json_task,
args=(vcd_fn, self.finish_event, qout, top))
self.p.append(p)
def before_run(self, sim):
if not self.multiprocess:
for m in find_cosim_modules():
self.cosim_modules.append((m.gear, m.trace_fn))
return
self.finish_event = multiprocessing.Event()
self.qin = []
self.p = []
for m in find_cosim_modules():
if not m.trace_fn:
continue
self.register_vcd_worker(m.trace_fn, top=m.gear)
self.register_vcd_worker(self.vcd_fn, find('/'))
for p in self.p:
p.start()
def sim_vcd_to_json(self):
graph = dump_json_graph('/')
if self.multiprocess:
self.finish_event.set()
json_vcds = [qin.recv() for qin in self.qin]
for p in self.p:
p.join()
else:
json_vcds = []
for top, trace_fn in self.cosim_modules:
json_vcds.append(vcd_to_json(top, follow(trace_fn)))
json_vcds.append(vcd_to_json(find('/'), follow(self.vcd_fn)))
visited_channels = set()
changes = []
for json_vcd in json_vcds:
if json_vcd is None:
continue
for p_name in json_vcd:
p = find(p_name)
intf_name = p_name
port_name = None if isinstance(p, Intf) else p.producer.name
if ((isinstance(p, InPort) or
(isinstance(p, OutPort) and node_hierarchical(p.gear)))
and len(p.producer.consumers) > 1):
for i in range(len(p.producer.consumers)):
if p.producer.consumers[i] is p:
break
if isinstance(p, InPort) and node_hierarchical(p.gear):
intf_name = p_name
bc_name = f'{p.producer.parent.name}/{p.producer.basename}_bc'
port_name = f'{bc_name}.dout{i}'
if isinstance(p, InPort) and (p.consumer is None or any(
isinstance(c, HDLConsumer) for c in p.consumer.consumers)):
intf_name = None
for channel_name in [intf_name, port_name]:
if channel_name is not None and channel_name not in visited_channels:
changes.append({'channelName': channel_name, 'changes': json_vcd[p_name]})
visited_channels.add(channel_name)
return {
'graphInfo': graph,
'simulationChanges': {
'startCycle': 0,
'endCycle': timestep(),
'channelChanges': changes
}
}
def finish(self):
if not self.finished:
err = None
try:
json_out = self.sim_vcd_to_json()
import json
json.dump(json_out, open(self.trace_fn, 'w'), separators=(',', ':'))
# json.dump(json_out, open(self.trace_fn, 'w'))
# json.dump(json_out, open(self.trace_fn, 'w'), indent=4)
except Exception as e:
for p in self.p:
p.terminate()
err = e
if err is not None:
raise err
self.finished = True
def after_cleanup(self, sim):
self.finish()
def websim_activate(var, val):
if val:
if WebSim not in reg['sim/extens']:
reg['debug/expand_trace_data'] = False
reg['debug/trace_end_cycle_dump'] = False
reg['sim/extens'].append(WebSim)
else:
if WebSim in reg['sim/extens']:
reg['debug/expand_trace_data'] = True
reg['debug/trace_end_cycle_dump'] = True
del reg['sim/extens'][reg['sim/extens'].index(WebSim)]
class WebSimPlugin(HDLPlugin):
@classmethod
def bind(cls):
reg.confdef('debug/webviewer', setter=websim_activate, default=False)
|
b68080aa05458d26ae2820e873b0541c61990888
|
2f5cc34ab90fce50ffca092e9e3acd283144ae48
|
/praatio/utilities/utils.py
|
513bfa821e4c666c3952600f49245923e43eac9a
|
[
"MIT"
] |
permissive
|
timmahrt/praatIO
|
e05859e56bb7f9de1fd776c07aed9dc3c1f989b2
|
0f0544e5a897f05993cbe183836f3773dcd817a8
|
refs/heads/main
| 2023-07-31T17:39:59.469860
| 2023-07-18T14:10:17
| 2023-07-18T14:10:17
| 23,467,063
| 275
| 37
|
MIT
| 2023-07-18T14:10:18
| 2014-08-29T14:45:36
|
Python
|
UTF-8
|
Python
| false
| false
| 15,558
|
py
|
utils.py
|
"""
Various generic utility functions
"""
import os
import subprocess
import itertools
import wave
from pkg_resources import resource_filename
from typing_extensions import Literal
from typing import Any, Iterator, List, Tuple, NoReturn, Type, Optional
from praatio.utilities import errors
from praatio.utilities import constants
Interval = constants.Interval
# Get the folder one level above the current folder
scriptsPath = resource_filename(
"praatio",
"praatScripts",
)
def find(list, value, reverse) -> Optional[int]:
"""Returns the first/last index of an item in a list"""
if value not in list:
return None
if reverse:
index = len(list) - list[::-1].index(value) - 1
else:
index = list.index(value)
return index
def reportNoop(_exception: Type[BaseException], _text: str) -> None:
pass
def reportException(exception: Type[BaseException], text: str) -> NoReturn:
raise exception(text)
def reportWarning(_exception: Type[BaseException], text: str) -> None:
print(text)
def getErrorReporter(reportingMode: Literal["silence", "warning", "error"]):
modeToFunc = {
constants.ErrorReportingMode.SILENCE: reportNoop,
constants.ErrorReportingMode.WARNING: reportWarning,
constants.ErrorReportingMode.ERROR: reportException,
}
return modeToFunc[reportingMode]
def checkIsUndershoot(time: float, referenceTime: float, errorReporter) -> bool:
if time < referenceTime:
errorReporter(
errors.OutOfBounds,
f"'{time}' occurs before minimum allowed time '{referenceTime}'",
)
return True
else:
return False
def checkIsOvershoot(time: float, referenceTime: float, errorReporter) -> bool:
if time > referenceTime:
errorReporter(
errors.OutOfBounds,
f"'{time}' occurs after maximum allowed time '{referenceTime}'",
)
return True
else:
return False
def validateOption(variableName, value, optionClass):
if value not in optionClass.validOptions:
raise errors.WrongOption(variableName, value, optionClass.validOptions)
def intervalOverlapCheck(
interval: Interval,
cmprInterval: Interval,
percentThreshold: float = 0,
timeThreshold: float = 0,
boundaryInclusive: bool = False,
) -> bool:
"""Checks whether two intervals overlap
Args:
interval:
cmprInterval:
percentThreshold: if percentThreshold is greater than 0, then
if the intervals overlap, they must overlap by at least this threshold
(0.2 would mean 20% overlap considering both intervals)
(eg [0, 6] and [3,8] have an overlap of 50%. If percentThreshold is set
to higher than 50%, the intervals will be considered to not overlap.)
timeThreshold: if greater than 0, then if the intervals overlap,
they must overlap by at least this threshold
boundaryInclusive: if true, then two intervals are considered to
overlap if they share a boundary
Returns:
bool:
"""
# TODO: move to Interval class?
startTime, endTime = interval[:2]
cmprStartTime, cmprEndTime = cmprInterval[:2]
overlapTime = max(0, min(endTime, cmprEndTime) - max(startTime, cmprStartTime))
overlapFlag = overlapTime > 0
# Do they share a boundary? Only need to check if one boundary ends
# when another begins (because otherwise, they overlap in other ways)
boundaryOverlapFlag = False
if boundaryInclusive:
boundaryOverlapFlag = startTime == cmprEndTime or endTime == cmprStartTime
# Is the overlap over a certain percent?
percentOverlapFlag = False
if percentThreshold > 0 and overlapFlag:
totalTime = max(endTime, cmprEndTime) - min(startTime, cmprStartTime)
percentOverlap = overlapTime / float(totalTime)
percentOverlapFlag = percentOverlap >= percentThreshold
overlapFlag = percentOverlapFlag
# Is the overlap more than a certain threshold?
timeOverlapFlag = False
if timeThreshold > 0 and overlapFlag:
timeOverlapFlag = overlapTime >= timeThreshold
overlapFlag = timeOverlapFlag
overlapFlag = (
overlapFlag or boundaryOverlapFlag or percentOverlapFlag or timeOverlapFlag
)
return overlapFlag
def getIntervalsInInterval(
start: float,
end: float,
intervals: List[Interval],
mode: Literal["strict", "lax", "truncated"],
) -> List[Interval]:
"""Gets all intervals that exist between /start/ and /end/
Args:
start: the target interval start time
end: the target interval stop time
intervals: the list of intervals to check
mode: Determines judgement criteria
- 'strict', only intervals wholly contained by the target
interval will be kept
- 'lax', partially contained intervals will be kept
- 'truncated', partially contained intervals will be
truncated to fit within the crop region.
Returns:
The list of intervals that overlap with the target interval
"""
# TODO: move to Interval class?
validateOption("mode", mode, constants.CropCollision)
containedIntervals = []
for interval in intervals:
matchedEntry = None
# Don't need to investigate if the current interval is
# before start or after end
if interval.end <= start or interval.start >= end:
continue
# Determine if the current interval is wholly contained
# within the superEntry
if interval.start >= start and interval.end <= end:
matchedEntry = interval
# If the current interval is only partially contained within the
# target interval AND inclusion is 'lax', include it anyways
elif mode == constants.CropCollision.LAX and (
interval.start >= start or interval.end <= end
):
matchedEntry = interval
# The current interval stradles the end of the target interval
elif interval.start >= start and interval.end > end:
if mode == constants.CropCollision.TRUNCATED:
matchedEntry = Interval(interval.start, end, interval.label)
# The current interval stradles the start of the target interval
elif interval.start < start and interval.end <= end:
if mode == constants.CropCollision.TRUNCATED:
matchedEntry = Interval(start, interval.end, interval.label)
# The current interval contains the target interval completely
elif interval.start <= start and interval.end >= end:
if mode == constants.CropCollision.LAX:
matchedEntry = interval
elif mode == constants.CropCollision.TRUNCATED:
matchedEntry = Interval(start, end, interval.label)
if matchedEntry is not None:
containedIntervals.append(matchedEntry)
return containedIntervals
def escapeQuotes(text: str) -> str:
return text.replace('"', '""')
def strToIntOrFloat(inputStr: str) -> float:
return float(inputStr) if "." in inputStr else int(inputStr)
def getValueAtTime(
timestamp: float,
sortedDataTupleList: List[Tuple[Any, ...]],
fuzzyMatching: bool = False,
startI: int = 0,
) -> Tuple[Tuple[Any, ...], int]:
"""Get the value in the data list (sorted by time) that occurs at this point
If fuzzyMatching is True, if there is not a value
at the requested timestamp, the nearest feature value will be taken.
The procedure assumes that all data is ordered in time.
dataTupleList should be in the form
[(t1, v1a, v1b, ..), (t2, v2a, v2b, ..), ..]
The procedure makes one pass through dataTupleList and one
pass through self.entries. If the data is not sequentially
ordered, the incorrect response will be returned.
For efficiency purposes, it takes a starting index and returns the ending
index.
"""
# TODO: move to Point class?
i = startI
bestRow: Tuple[Any, ...] = ()
# Only find exact timestamp matches
if fuzzyMatching is False:
while True:
try:
currRow = sortedDataTupleList[i]
except IndexError:
break
currTime = currRow[0]
if currTime >= timestamp:
if timestamp == currTime:
bestRow = currRow
break
i += 1
# Find the closest timestamp
else:
bestTime = sortedDataTupleList[i][0]
bestRow = sortedDataTupleList[i]
while True:
try:
dataTuple = sortedDataTupleList[i]
except IndexError:
i -= 1
break # Last known value is the closest one
currTime = dataTuple[0]
currRow = dataTuple
currDiff = abs(currTime - timestamp)
bestDiff = abs(bestTime - timestamp)
if currDiff < bestDiff: # We're closer to the target val
bestTime = currTime
bestRow = currRow
if currDiff == 0:
break # Can't do better than a perfect match
elif currDiff > bestDiff:
i -= 1
break # We've past the best value.
i += 1
retRow = bestRow
return retRow, i
def getValuesInInterval(dataTupleList: List, start: float, end: float) -> List:
"""Gets the values that exist within an interval
The function assumes that the data is formated as
[(t1, v1a, v1b, ...), (t2, v2a, v2b, ...)]
"""
# TODO: move to Interval class?
intervalDataList = []
for dataTuple in dataTupleList:
time = dataTuple[0]
if start <= time and end >= time:
intervalDataList.append(dataTuple)
return intervalDataList
def sign(x: float) -> int:
"""Returns 1 if x is positive, 0 if x is 0, and -1 otherwise"""
retVal = 0
if x > 0:
retVal = 1
elif x < 0:
retVal = -1
return retVal
def invertIntervalList(
inputList: List[Tuple[float, float]], minValue: float = None, maxValue: float = None
) -> List[Tuple[float, float]]:
"""Inverts the segments of a list of intervals
e.g.
[(0,1), (4,5), (7,10)] -> [(1,4), (5,7)]
[(0.5, 1.2), (3.4, 5.0)] -> [(0.0, 0.5), (1.2, 3.4)]
"""
if any([interval[0] >= interval[1] for interval in inputList]):
raise errors.ArgumentError("Interval start occured before interval end")
inputList = sorted(inputList)
# Special case -- empty lists
invList: List[Tuple[float, float]]
if len(inputList) == 0 and minValue is not None and maxValue is not None:
invList = [
(minValue, maxValue),
]
else:
# Insert in a garbage head and tail value for the purpose
# of inverting, in the range does not start and end at the
# smallest and largest values
if minValue is not None and inputList[0][0] > minValue:
inputList.insert(0, (-1, minValue))
if maxValue is not None and inputList[-1][1] < maxValue:
inputList.append((maxValue, maxValue + 1))
invList = [
(inputList[i][1], inputList[i + 1][0]) for i in range(0, len(inputList) - 1)
]
# If two intervals in the input share a boundary, we'll get invalid intervals in the output
# eg invertIntervalList([(0, 1), (1, 2)]) -> [(1, 1)]
invList = [interval for interval in invList if interval[0] != interval[1]]
return invList
def makeDir(path: str) -> None:
"""Create a new directory
Unlike os.mkdir, it does not throw an exception if the directory already exists
"""
if not os.path.exists(path):
os.mkdir(path)
def findAll(txt: str, subStr: str) -> List[int]:
"""Find the starting indicies of all instances of subStr in txt"""
indexList = []
index = 0
while True:
try:
index = txt.index(subStr, index)
except ValueError:
break
indexList.append(int(index))
index += 1
return indexList
def runPraatScript(
praatEXE: str, scriptFN: str, argList: List[Any], cwd: str = None
) -> None:
# Popen gives a not-very-transparent error
if not os.path.exists(praatEXE):
raise errors.FileNotFound(praatEXE)
if not os.path.exists(scriptFN):
raise errors.FileNotFound(scriptFN)
argList = ["%s" % arg for arg in argList]
cmdList = [praatEXE, "--run", scriptFN] + argList
myProcess = subprocess.Popen(cmdList, cwd=cwd)
if myProcess.wait():
raise errors.PraatExecutionFailed(cmdList)
def safeZip(listOfLists: List[list], enforceLength: bool) -> Iterator[Any]:
"""A safe version of python's zip()
If two sublists are of different sizes, python's zip will truncate
the output to be the smaller of the two.
safeZip throws an exception if the size of the any sublist is different
from the rest.
"""
if enforceLength is True:
length = len(listOfLists[0])
if not all([length == len(subList) for subList in listOfLists]):
raise errors.SafeZipException("Lists to zip have different sizes.")
return itertools.zip_longest(*listOfLists)
def getWavDuration(wavFN: str) -> float:
"For internal use. See praatio.audio.QueryWav() for general use."
audiofile = wave.open(wavFN, "r")
params = audiofile.getparams()
framerate = params[2]
nframes = params[3]
duration = float(nframes) / framerate
return duration
def chooseClosestTime(
targetTime: float, candidateA: Optional[float], candidateB: Optional[float]
) -> float:
"""Chooses the closest time between two options that straddle the target time
Args:
targetTime: the time to compare against
candidateA: the first candidate
candidateB: the second candidate
Returns:
the closer of the two options to the target time
Raises:
ArgumentError: When no left or right candidate is provided
"""
closestTime: float
if candidateA is None and candidateB is None:
raise (errors.ArgumentError("Must provide at"))
elif candidateA is None and candidateB is not None:
closestTime = candidateB
elif candidateB is None and candidateA is not None:
closestTime = candidateA
elif candidateB is not None and candidateA is not None:
aDiff = abs(candidateA - targetTime)
bDiff = abs(candidateB - targetTime)
if aDiff <= bDiff:
closestTime = candidateA
else:
closestTime = candidateB
return closestTime
def getInterval(
startTime: float, duration: float, max: float, reverse: bool
) -> Tuple[float, float]:
"""returns an interval before or after some start time
The returned timestamps will be between 0 and max
Args:
startTime: the time to get the interval from
duration: duration of the interval
max: the maximum allowed time
reverse: is the interval before or after the targetTime?
Returns:
the start and end time of an interval
"""
if reverse is True:
endTime = startTime
startTime -= duration
else:
endTime = startTime + duration
# Don't read over the edges
if startTime < 0:
startTime = 0
elif endTime > max:
endTime = max
return (startTime, endTime)
|
ede90c97f39e7c89afd0c4454b4b1451b1253413
|
231f2aa788dbcdf064b45a1baf3f1ca17517b2b9
|
/caer/coreten/tensor.py
|
dc8a2283d4ab6a69040c0cba0730804da0dac75e
|
[
"MIT"
] |
permissive
|
jasmcaus/caer
|
5184d34a39b2074d5500a3eef6de2703e7b9502d
|
3708bce3d6b1568b844ac78c55402862c548de61
|
refs/heads/master
| 2023-08-11T06:04:24.999595
| 2021-10-13T21:05:33
| 2021-10-13T21:05:33
| 285,644,536
| 806
| 159
|
MIT
| 2023-04-01T08:26:46
| 2020-08-06T18:36:14
|
Python
|
UTF-8
|
Python
| false
| false
| 2,714
|
py
|
tensor.py
|
# _____ ______ _____
# / ____/ /\ | ____ | __ \
# | | / \ | |__ | |__) | Caer - Modern Computer Vision
# | | / /\ \ | __| | _ / Languages: Python, C, C++, Cuda
# | |___ / ____ \ | |____ | | \ \ http://github.com/jasmcaus/caer
# \_____\/_/ \_ \______ |_| \_\
# Licensed under the MIT License <http://opensource.org/licenses/MIT>
# SPDX-License-Identifier: MIT
# Copyright (c) 2020-2021 The Caer Authors <http://github.com/jasmcaus>
# pylint:disable=unused-argument
import numpy as np
from ..annotations import Union, Tuple, List
from ._tensor_str import _str
from ._tensor_base import _TensorBase
# We use np.ndarray as a super class, because ``ndarray.view()`` expects an ndarray sub-class
# We also derive useful class methods from ``_TensorBase`` which serves as the Tensor's Base
class Tensor(_TensorBase, np.ndarray):
# This is required to get the type(Tensor) to be 'caer.Tensor'.
# Without this, type(Tensor) is 'caer.tensor.Tensor' which is not ideal.
# Alternatively, we may shove this class to __init__.py, but this would, again, not be ideal
__module__ = "caer"
# def __new__(cls, x, dtype=None):
def __new__(cls, x: Union[Tuple, List, np.ndarray], cspace: str, dtype=None):
if not isinstance(x, (tuple, list, np.ndarray)):
raise TypeError("Data needs to be (ideally) a list")
if not isinstance(cspace, str) and cspace is not None:
raise TypeError(f"`cspace` needs to be of type <string>, not {type(cspace)}")
obj = np.asarray(x, dtype=dtype).view(cls)
obj.dtype = obj.dtype
return obj
def __init__(self, x, cspace, dtype=None):
super().__init__() # gets attributes from '_TensorBase'
self.x = x
if cspace is None:
self.cspace = "null"
else:
if cspace in ("rgb", "bgr", "gray", "hsv", "hls", "lab", "yuv", "luv"):
self.cspace = cspace
else:
raise ValueError("The `cspace` attribute needs to be either rgb/bgr/gray/hsv/hls/lab/yuv/luv")
def __repr__(self):
return _str(self)
def __str__(self):
return self.__repr__()
def is_tensor(x: Tensor) -> bool:
r"""
Returns True if `x` is a Caer tensor.
Note that this function is simply doing ``isinstance(x, Tensor)``. Using the ``isinstance`` check is better for typechecking with mypy, and more explicit - so it's recommended to use that instead of ``is_tensor``.
For now, Caer Tensors are simply customized Numpy arrays.
Args:
x (Object): Object to test
"""
return isinstance(x, Tensor) or "caer.Tensor" in str(type(x))
|
ab648ad6c4f3e1e66be64d1990a9a3cdf502119a
|
744c3b66611b08782fcdd9d66261c4d55b00d426
|
/examples/pybullet/gym/pybullet_envs/minitaur/robots/safety/motor_state_validator.py
|
3ccf34c02c06dde9aa3b3654595bc6e2fe8303d6
|
[
"Zlib"
] |
permissive
|
erwincoumans/bullet3
|
4ff9e0aa64b641c65b57b26f415dd69dbfb12256
|
6d181d78a5c7be8714c74055cddcf63d5ccef70a
|
refs/heads/master
| 2023-03-10T14:58:18.072562
| 2023-02-24T18:32:53
| 2023-02-24T18:32:53
| 31,621,748
| 103
| 29
|
NOASSERTION
| 2019-02-25T17:31:00
| 2015-03-03T21:15:54
|
C++
|
UTF-8
|
Python
| false
| false
| 5,217
|
py
|
motor_state_validator.py
|
"""Software safety layer for robot control.
Validates the motor states received from the motor encoder.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import typing
from pybullet_envs.minitaur.robots.safety import data_types
from pybullet_envs.minitaur.robots.safety import utilities
from pybullet_envs.minitaur.robots.safety.python import moving_window_filter
# The default internal buffer size for the MotorStateValidator.
_DEQUE_SIZE = 200
class MotorStateValidator(object):
"""A safety guard to check motor states.
Monitors the status of the motor and detects anomalies in the
readings. For example, the class will throw safety errors if the motor
velocity is too large. Currently we support checking of motor angle, velocity,
gain, torque, as well as the timestamp interval.
Attributes:
last_state: The last received motor state.
"""
def __init__(
self,
motor_id: typing.Any,
position_bound: data_types.Bound,
position_gain_bound: data_types.Bound,
velocity_bound: data_types.Bound,
velocity_gain_bound: data_types.Bound,
torque_bound: data_types.Bound,
timestamp_delta_bound: data_types.Bound,
average_abs_velocity_bound: data_types.Bound,
average_abs_power_bound: data_types.Bound,
state_buffer_size: int = _DEQUE_SIZE,
):
"""Initializes the class.
Args:
motor_id: Unique ID for the motor.
position_bound: The lower/upper bound of the motor angle.
position_gain_bound: The lower/upper bound of the motor position gain for
PD control.
velocity_bound: The lower/upper bound of the motor speed.
velocity_gain_bound: The lower/upper bound of the motor velocity gain for
PD control.
torque_bound: The lower/upper bound of the measured motor torque.
timestamp_delta_bound: The range of timestamp difference between two
consecutively received motor states.
average_abs_velocity_bound: The average absolute velocity limit.
average_abs_power_bound: The average absolute mechanical power limit.
state_buffer_size: The buffer size used to calculate the average.
"""
assert state_buffer_size > 1
self.last_state = None
self._motor_id = motor_id
self._position_bound = position_bound
self._position_gain_bound = position_gain_bound
self._velocity_bound = velocity_bound
self._velocity_gain_bound = velocity_gain_bound
self._torque_bound = torque_bound
self._timestamp_delta_bound = timestamp_delta_bound
self._average_abs_velocity_bound = average_abs_velocity_bound
self._average_abs_power_bound = average_abs_power_bound
# For velocity/power, we use a filter to compute their averages
# over a small period. This is to avoid the noisy readings giving false
# positive.
self._abs_velocity_filter = moving_window_filter.MovingWindowFilter(
state_buffer_size)
self._abs_power_filter = moving_window_filter.MovingWindowFilter(
state_buffer_size)
def on_state(self, new_state: data_types.MotorState):
"""Adds a new motor state and validates it.
Will validate both the instantenous state as well as statitical
averages.
Args:
new_state: A new state from the motor encoder.
Raises:
safety_error.OutOfBoundError: When any of the motor readings (e.g.
position, torque) is out of bound.
"""
# We first validate the new state.
motor_str = "motor {} ".format(self._motor_id)
utilities.assert_in_bound(motor_str + "position", new_state.position,
self._position_bound)
utilities.assert_in_bound(motor_str + "velocity", new_state.velocity,
self._velocity_bound)
utilities.assert_in_bound(motor_str + "position gain",
new_state.position_gain,
self._position_gain_bound)
utilities.assert_in_bound(motor_str + "velocity gain",
new_state.velocity_gain,
self._velocity_gain_bound)
utilities.assert_in_bound(motor_str + "torque", new_state.torque,
self._torque_bound)
if not self.last_state:
self.last_state = new_state
return
last_state = self.last_state
# Check if the time interval between two received states are large.
delta_time = new_state.timestamp - last_state.timestamp
utilities.assert_in_bound(motor_str + "timestamp", delta_time,
self._timestamp_delta_bound)
average_abs_velocity = self._abs_velocity_filter.CalculateAverage(
abs(new_state.velocity))
utilities.assert_in_bound(motor_str + "average velocity",
average_abs_velocity,
self._average_abs_velocity_bound)
average_abs_power = self._abs_power_filter.CalculateAverage(
abs(new_state.velocity * new_state.torque))
utilities.assert_in_bound(motor_str + "average power", average_abs_power,
self._average_abs_power_bound)
self.last_state = new_state
|
27ab7c4f7e6e153670fa245120f6b6f51956d88b
|
9abd182d02355ddf0b79afd4a35f7127a4a66f7a
|
/gluoncv/model_zoo/rcnn/faster_rcnn/rcnn_target.py
|
f0383835dff4ac035d3dc6df0253082fe0708d9e
|
[
"Apache-2.0"
] |
permissive
|
dmlc/gluon-cv
|
e1303086419a5733661d0fcb9095c09d4f2382ad
|
567775619f3b97d47e7c360748912a4fd883ff52
|
refs/heads/master
| 2023-07-19T12:02:36.824294
| 2023-01-19T00:37:33
| 2023-01-19T00:37:33
| 122,896,249
| 6,064
| 1,458
|
Apache-2.0
| 2023-01-19T00:37:35
| 2018-02-26T01:33:21
|
Python
|
UTF-8
|
Python
| false
| false
| 9,106
|
py
|
rcnn_target.py
|
"""RCNN Target Generator."""
from __future__ import absolute_import
from mxnet import autograd
from mxnet import gluon
from ....nn.coder import MultiClassEncoder, NormalizedPerClassBoxCenterEncoder
class RCNNTargetSampler(gluon.HybridBlock):
"""A sampler to choose positive/negative samples from RCNN Proposals
Parameters
----------
num_image: int
Number of input images.
num_proposal: int
Number of input proposals.
num_sample : int
Number of samples for RCNN targets.
pos_iou_thresh : float
Proposal whose IOU larger than ``pos_iou_thresh`` is regarded as positive samples.
Proposal whose IOU smaller than ``pos_iou_thresh`` is regarded as negative samples.
pos_ratio : float
``pos_ratio`` defines how many positive samples (``pos_ratio * num_sample``) is
to be sampled.
max_num_gt : int
Maximum ground-truth number for each example. This is only an upper bound, not
necessarily very precise. However, using a very big number may impact the training speed.
"""
def __init__(self, num_image, num_proposal, num_sample, pos_iou_thresh, pos_ratio, max_num_gt):
super(RCNNTargetSampler, self).__init__()
self._num_image = num_image
self._num_proposal = num_proposal
self._num_sample = num_sample
self._max_pos = int(round(num_sample * pos_ratio))
self._pos_iou_thresh = pos_iou_thresh
self._max_num_gt = max_num_gt
# pylint: disable=arguments-differ
def hybrid_forward(self, F, rois, scores, gt_boxes):
"""Handle B=self._num_image by a for loop.
Parameters
----------
rois: (B, self._num_proposal, 4) encoded in (x1, y1, x2, y2).
scores: (B, self._num_proposal, 1), value range [0, 1] with ignore value -1.
gt_boxes: (B, M, 4) encoded in (x1, y1, x2, y2), invalid box should have area of 0.
Returns
-------
rois: (B, self._num_sample, 4), randomly drawn from proposals
samples: (B, self._num_sample), value +1: positive / 0: ignore / -1: negative.
matches: (B, self._num_sample), value between [0, M)
"""
with autograd.pause():
# collect results into list
new_rois = []
new_samples = []
new_matches = []
for i in range(self._num_image):
roi = F.squeeze(F.slice_axis(rois, axis=0, begin=i, end=i + 1), axis=0)
score = F.squeeze(F.slice_axis(scores, axis=0, begin=i, end=i + 1), axis=0)
gt_box = F.squeeze(F.slice_axis(gt_boxes, axis=0, begin=i, end=i + 1), axis=0)
gt_score = F.sign(F.sum(gt_box, axis=-1, keepdims=True) + 1)
# concat rpn roi with ground truth. mix gt with generated boxes.
all_roi = F.concat(roi, gt_box, dim=0)
all_score = F.concat(score, gt_score, dim=0).squeeze(axis=-1)
# calculate (N, M) ious between (N, 4) anchors and (M, 4) bbox ground-truths
# cannot do batch op, will get (B, N, B, M) ious
ious = F.contrib.box_iou(all_roi, gt_box, format='corner')
# match to argmax iou
ious_max = ious.max(axis=-1)
ious_argmax = ious.argmax(axis=-1)
# init with 2, which are neg samples
mask = F.ones_like(ious_max) * 2
# mark all ignore to 0
mask = F.where(all_score < 0, F.zeros_like(mask), mask)
# mark positive samples with 3
pos_mask = ious_max >= self._pos_iou_thresh
mask = F.where(pos_mask, F.ones_like(mask) * 3, mask)
# shuffle mask
rand = F.random.uniform(0, 1, shape=(self._num_proposal + self._max_num_gt,))
rand = F.slice_like(rand, ious_argmax)
index = F.argsort(rand)
mask = F.take(mask, index)
ious_argmax = F.take(ious_argmax, index)
# sample pos samples
order = F.argsort(mask, is_ascend=False)
topk = F.slice_axis(order, axis=0, begin=0, end=self._max_pos)
topk_indices = F.take(index, topk)
topk_samples = F.take(mask, topk)
topk_matches = F.take(ious_argmax, topk)
# reset output: 3 pos 2 neg 0 ignore -> 1 pos -1 neg 0 ignore
topk_samples = F.where(topk_samples == 3,
F.ones_like(topk_samples), topk_samples)
topk_samples = F.where(topk_samples == 2,
F.ones_like(topk_samples) * -1, topk_samples)
# sample neg samples
index = F.slice_axis(index, axis=0, begin=self._max_pos, end=None)
mask = F.slice_axis(mask, axis=0, begin=self._max_pos, end=None)
ious_argmax = F.slice_axis(ious_argmax, axis=0, begin=self._max_pos, end=None)
# change mask: 4 neg 3 pos 0 ignore
mask = F.where(mask == 2, F.ones_like(mask) * 4, mask)
order = F.argsort(mask, is_ascend=False)
num_neg = self._num_sample - self._max_pos
bottomk = F.slice_axis(order, axis=0, begin=0, end=num_neg)
bottomk_indices = F.take(index, bottomk)
bottomk_samples = F.take(mask, bottomk)
bottomk_matches = F.take(ious_argmax, bottomk)
# reset output: 4 neg 3 pos 0 ignore -> 1 pos -1 neg 0 ignore
bottomk_samples = F.where(bottomk_samples == 3,
F.ones_like(bottomk_samples), bottomk_samples)
bottomk_samples = F.where(bottomk_samples == 4,
F.ones_like(bottomk_samples) * -1, bottomk_samples)
# output
indices = F.concat(topk_indices, bottomk_indices, dim=0)
samples = F.concat(topk_samples, bottomk_samples, dim=0)
matches = F.concat(topk_matches, bottomk_matches, dim=0)
sampled_rois = all_roi.take(indices)
x1, y1, x2, y2 = F.split(sampled_rois, axis=-1, num_outputs=4, squeeze_axis=True)
rois_area = (x2 - x1) * (y2 - y1)
ind = F.argsort(rois_area)
new_rois.append(sampled_rois.take(ind))
new_samples.append(samples.take(ind))
new_matches.append(matches.take(ind))
# stack all samples together
new_rois = F.stack(*new_rois, axis=0)
new_samples = F.stack(*new_samples, axis=0)
new_matches = F.stack(*new_matches, axis=0)
return new_rois, new_samples, new_matches
class RCNNTargetGenerator(gluon.HybridBlock):
"""RCNN target encoder to generate matching target and regression target values.
Parameters
----------
num_class : int
Number of total number of positive classes.
max_pos : int, default is 128
Upper bound of Number of positive samples.
per_device_batch_size : int, default is 1
Per device batch size
means : iterable of float, default is (0., 0., 0., 0.)
Mean values to be subtracted from regression targets.
stds : iterable of float, default is (.1, .1, .2, .2)
Standard deviations to be divided from regression targets.
"""
def __init__(self, num_class, max_pos=128, per_device_batch_size=1, means=(0., 0., 0., 0.),
stds=(.1, .1, .2, .2)):
super(RCNNTargetGenerator, self).__init__()
self._cls_encoder = MultiClassEncoder()
self._box_encoder = NormalizedPerClassBoxCenterEncoder(
num_class=num_class, max_pos=max_pos, per_device_batch_size=per_device_batch_size,
means=means, stds=stds)
# pylint: disable=arguments-differ, unused-argument
def hybrid_forward(self, F, roi, samples, matches, gt_label, gt_box):
"""Components can handle batch images
Parameters
----------
roi: (B, N, 4), input proposals
samples: (B, N), value +1: positive / -1: negative.
matches: (B, N), value [0, M), index to gt_label and gt_box.
gt_label: (B, M), value [0, num_class), excluding background class.
gt_box: (B, M, 4), input ground truth box corner coordinates.
Returns
-------
cls_target: (B, N), value [0, num_class + 1), including background.
box_target: (B, N, C, 4), only foreground class has nonzero target.
box_weight: (B, N, C, 4), only foreground class has nonzero weight.
"""
with autograd.pause():
# cls_target (B, N)
cls_target = self._cls_encoder(samples, matches, gt_label)
# box_target, box_weight (C, B, N, 4)
box_target, box_mask, indices = self._box_encoder(samples, matches, roi, gt_label,
gt_box)
return cls_target, box_target, box_mask, indices
|
1fca19d37aa2c2126777a652976039bf1e4433c2
|
e65a4dbfbfb0e54e59787ba7741efee12f7687f3
|
/science/py-mdp/files/patch-mdp__configuration.py
|
3a15968aa3b3293ded3fd32a8d904c1ad01da1db
|
[
"BSD-2-Clause"
] |
permissive
|
freebsd/freebsd-ports
|
86f2e89d43913412c4f6b2be3e255bc0945eac12
|
605a2983f245ac63f5420e023e7dce56898ad801
|
refs/heads/main
| 2023-08-30T21:46:28.720924
| 2023-08-30T19:33:44
| 2023-08-30T19:33:44
| 1,803,961
| 916
| 918
|
NOASSERTION
| 2023-09-08T04:06:26
| 2011-05-26T11:15:35
| null |
UTF-8
|
Python
| false
| false
| 425
|
py
|
patch-mdp__configuration.py
|
--- mdp/configuration.py.orig 2016-03-04 08:39:38 UTC
+++ mdp/configuration.py
@@ -388,7 +388,7 @@ def set_configuration():
try:
import svm as libsvm
libsvm.libsvm
- except ImportError as exc:
+ except (ImportError, OSError) as exc:
config.ExternalDepFailed('libsvm', exc)
except AttributeError as exc:
config.ExternalDepFailed('libsvm', 'libsvm version >= 2.91 required')
|
7e54b8e13a5d442abd44f56d6e8094927d1532b8
|
0cfd410037a841dc618d74e6f51b937256afbea0
|
/drop_queue.py
|
a1e0df7fb36f0fe43fbf222d1c6b7e7cb4397392
|
[] |
no_license
|
MyrikLD/LinusTrinus
|
19e997e2c98325e294878a77ee4d24f4d664e4ba
|
bc34a38e2b0d534b9a5396af1509d63877b8f75e
|
refs/heads/master
| 2023-04-06T13:55:38.413242
| 2023-04-05T15:07:50
| 2023-04-05T15:07:50
| 117,450,600
| 130
| 22
| null | 2023-04-05T15:07:52
| 2018-01-14T17:19:48
|
Python
|
UTF-8
|
Python
| false
| false
| 256
|
py
|
drop_queue.py
|
import queue
class DropQueue(queue.Queue):
def put(self, *args, **kwargs):
if self.full():
try:
self.get_nowait()
except queue.Empty:
pass
queue.Queue.put(self, *args, **kwargs)
|
b89df0c5f5fc86948efb7f871d2acf06ed347795
|
ddddaa700e4642f46a2c1e1e0271a7c8ea62ba0f
|
/harness/determined/tensorboard/fetchers/azure.py
|
87f90857c28aedae652787ddbfc388bb6a1f4ee9
|
[
"Apache-2.0"
] |
permissive
|
determined-ai/determined
|
9d563cb5ffd074c88ee5edc9bf22ab9c3cb78c7e
|
8239b1993f4f44390f4e88901ffaf3b12429b83c
|
refs/heads/main
| 2023-08-21T12:13:36.651298
| 2023-08-21T08:34:16
| 2023-08-21T08:34:16
| 253,846,879
| 2,531
| 330
|
Apache-2.0
| 2023-09-14T21:54:17
| 2020-04-07T16:12:29
|
Go
|
UTF-8
|
Python
| false
| false
| 2,534
|
py
|
azure.py
|
import datetime
import logging
import os
import urllib
from typing import Any, Callable, Dict, Generator, List
from .base import Fetcher
logger = logging.getLogger(__name__)
class AzureFetcher(Fetcher):
def __init__(self, storage_config: Dict[str, Any], storage_paths: List[str], local_dir: str):
from azure.storage import blob
connection_string = storage_config.get("connection_string")
container = storage_config.get("container")
account_url = storage_config.get("account_url")
credential = storage_config.get("credential")
if storage_config.get("connection_string"):
self.client = blob.BlobServiceClient.from_connection_string(connection_string)
elif account_url:
self.client = blob.BlobServiceClient(account_url, credential)
else:
raise ValueError("Either 'container_string' or 'account_url' must be specified.")
if container is None:
raise ValueError("'container' must be specified.")
self.container_name = container if not container.endswith("/") else container[:-1]
self.local_dir = local_dir
self.storage_paths = storage_paths
self._file_records = {} # type: Dict[str, datetime.datetime]
def _list(self, storage_path: str) -> Generator[str, None, None]:
logger.debug(
f"Listing keys in container: '{self.container_name}'"
" with storage_path: '{storage_path}'"
)
container = self.client.get_container_client(self.container_name)
prefix = urllib.parse.urlparse(storage_path).path.lstrip("/")
blobs = container.list_blobs(name_starts_with=prefix)
for blob in blobs:
filepath, mtime = blob["name"], blob["last_modified"]
prev_mtime = self._file_records.get(filepath)
if prev_mtime is not None and prev_mtime >= mtime:
continue
self._file_records[filepath] = mtime
yield filepath
def _fetch(self, filepath: str, new_file_callback: Callable) -> None:
local_path = os.path.join(self.local_dir, self.container_name, filepath)
dir_path = os.path.dirname(local_path)
os.makedirs(dir_path, exist_ok=True)
with open(local_path, "wb") as local_file:
stream = self.client.get_blob_client(self.container_name, filepath).download_blob()
stream.readinto(local_file)
logger.debug(f"Downloaded file to local: {local_path}")
new_file_callback()
|
6ea1bf8a4b0e0a38f9f3bb16ef81b2bf0f2150af
|
c1da9aff530a66aebe2fb3eb28d2b806dd22cb85
|
/scripts/build_demo_dataset.py
|
25fcd2a18aad9e2a24bee04a943f35c6678e0db7
|
[] |
no_license
|
XgDuan/WSDEC
|
6a59e71a715aefe5701203f2741a34021a6b80bd
|
64abf5ee041ac6ad138de31880dd6cad73c70b00
|
refs/heads/master
| 2020-04-02T14:00:32.408337
| 2020-03-21T03:53:27
| 2020-03-21T03:53:27
| 154,506,382
| 104
| 25
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 594
|
py
|
build_demo_dataset.py
|
import json
def build_demo_dataset(source_file, target_file, item_num):
captioning = json.load(open(source_file, 'r'))
target_keys = captioning.keys()[:item_num]
target = {key:captioning[key] for key in target_keys}
json.dump(target, open(target_file, 'w'))
if __name__ == '__main__':
build_demo_dataset('./data/densecap/train.json', './data/demo_densecap/train.json', 120)
build_demo_dataset('./data/densecap/val_1.json', './data/demo_densecap/val_1.json', 60)
build_demo_dataset('./data/densecap/val_2.json', './data/demo_densecap/val_2.json', 60)
|
eb984d9354bbf6dd2d273538573432b508f95a42
|
9508168e33819894da0adba6f5738035823ce580
|
/djmoney/__init__.py
|
f3ca3321b7901e89756f33167d59b3a5170cf28b
|
[
"BSD-3-Clause"
] |
permissive
|
django-money/django-money
|
a05210f57848530603debaf770041fd77a891468
|
aceac3ecc1f0202e0bc59cb712c8b11e00b422f7
|
refs/heads/main
| 2023-08-18T02:11:13.824022
| 2023-07-26T13:24:06
| 2023-07-26T13:24:06
| 1,750,874
| 1,428
| 288
|
NOASSERTION
| 2023-09-10T19:45:19
| 2011-05-15T12:13:19
|
Python
|
UTF-8
|
Python
| false
| false
| 247
|
py
|
__init__.py
|
__version__ = "3.2.0"
try:
import django
if django.VERSION < (3, 2):
default_app_config = "djmoney.apps.MoneyConfig"
except ModuleNotFoundError:
# this part is useful for allow setup.py to be used for version checks
pass
|
2a463896d9a8f90806543a00189fd85c6755e704
|
6d652aa802d90571a640ac0b538ff3055d0e34c5
|
/i3pystatus/mail/thunderbird.py
|
61c7710cd8e0088cb4d5f8a4ea696db3a4a5164b
|
[
"MIT"
] |
permissive
|
enkore/i3pystatus
|
38eaea8203ed309ff90e1717bd3a9075f12590b0
|
0820dd4e3d479dddec7797b2ea9a83da0f62b7cf
|
refs/heads/current
| 2023-08-18T11:36:18.296269
| 2023-04-25T20:56:08
| 2023-04-25T20:56:08
| 8,130,605
| 438
| 244
|
MIT
| 2023-08-13T12:13:33
| 2013-02-11T01:01:15
|
Python
|
UTF-8
|
Python
| false
| false
| 1,621
|
py
|
thunderbird.py
|
# This plugin listens for dbus signals emitted by the
# thunderbird-dbus-sender extension for TB:
# https://github.com/janoliver/thunderbird-dbus-sender
# The plugin must be active and thunderbird running for the module to work
# properly.
from functools import partial
import dbus
from dbus.mainloop.glib import DBusGMainLoop
from gi.repository import GObject
from i3pystatus.mail import Backend
class Thunderbird(Backend):
"""
This class listens for dbus signals emitted by
the dbus-sender extension for thunderbird.
Requires python-dbus
"""
_unread = set()
def init(self):
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
bus = dbus.SessionBus()
bus.add_signal_receiver(self.new_msg,
dbus_interface="org.mozilla.thunderbird.DBus",
signal_name="NewMessageSignal")
bus.add_signal_receiver(self.changed_msg,
dbus_interface="org.mozilla.thunderbird.DBus",
signal_name="ChangedMessageSignal")
loop = GObject.MainLoop()
dbus.mainloop.glib.threads_init()
self.context = loop.get_context()
self.run = partial(self.context.iteration, False)
def new_msg(self, id, author, subject):
if id not in self._unread:
self._unread.add(id)
def changed_msg(self, id, event):
if event == "read" and id in self._unread:
self._unread.remove(id)
@property
def unread(self):
self.run()
return len(self._unread)
Backend = Thunderbird
|
91ae0507aef2928b9f11f15b8fbe776954778650
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/__init__.py
|
0858ec706f38091c3545c18d4146d02947db2333
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,994
|
py
|
__init__.py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._accounts_operations import AccountsOperations
from ._consumer_invitations_operations import ConsumerInvitationsOperations
from ._data_sets_operations import DataSetsOperations
from ._data_set_mappings_operations import DataSetMappingsOperations
from ._email_registrations_operations import EmailRegistrationsOperations
from ._invitations_operations import InvitationsOperations
from ._operations import Operations
from ._shares_operations import SharesOperations
from ._provider_share_subscriptions_operations import ProviderShareSubscriptionsOperations
from ._share_subscriptions_operations import ShareSubscriptionsOperations
from ._consumer_source_data_sets_operations import ConsumerSourceDataSetsOperations
from ._synchronization_settings_operations import SynchronizationSettingsOperations
from ._triggers_operations import TriggersOperations
from ._patch import __all__ as _patch_all
from ._patch import * # type: ignore # pylint: disable=unused-wildcard-import
from ._patch import patch_sdk as _patch_sdk
__all__ = [
"AccountsOperations",
"ConsumerInvitationsOperations",
"DataSetsOperations",
"DataSetMappingsOperations",
"EmailRegistrationsOperations",
"InvitationsOperations",
"Operations",
"SharesOperations",
"ProviderShareSubscriptionsOperations",
"ShareSubscriptionsOperations",
"ConsumerSourceDataSetsOperations",
"SynchronizationSettingsOperations",
"TriggersOperations",
]
__all__.extend([p for p in _patch_all if p not in __all__])
_patch_sdk()
|
e89e2885c0f06351ea42d16a336d5ae5905a49e2
|
0577a46d8d28e1fd8636893bbdd2b18270bb8eb8
|
/chromium/weblayer/browser/url_bar/DEPS
|
34bb2688e08925d4878ecb765a78d79fc8ff3725
|
[
"BSD-3-Clause"
] |
permissive
|
ric2b/Vivaldi-browser
|
388a328b4cb838a4c3822357a5529642f86316a5
|
87244f4ee50062e59667bf8b9ca4d5291b6818d7
|
refs/heads/master
| 2022-12-21T04:44:13.804535
| 2022-12-17T16:30:35
| 2022-12-17T16:30:35
| 86,637,416
| 166
| 41
|
BSD-3-Clause
| 2021-03-31T18:49:30
| 2017-03-29T23:09:05
| null |
UTF-8
|
Python
| false
| false
| 86
|
DEPS
|
include_rules = [
"+components/browsing_data/content",
"+components/page_info",
]
|
|
c13bd5d0ace7937e09f0b1b8f485b9bd22b89433
|
64d923ab490341af97c4e7f6d91bf0e6ccefdf4b
|
/tensorforce/core/models/tensorforce.py
|
3be50029ef85ea8eaa3611528d5b572e4d0b797f
|
[
"Apache-2.0"
] |
permissive
|
tensorforce/tensorforce
|
38d458fedeeaa481adf083397829cea434d020cd
|
1bf4c3abb471062fb66f9fe52852437756fd527b
|
refs/heads/master
| 2023-08-17T17:35:34.578444
| 2023-08-14T20:14:08
| 2023-08-14T20:14:08
| 85,491,050
| 1,312
| 246
|
Apache-2.0
| 2023-08-14T20:14:10
| 2017-03-19T16:24:22
|
Python
|
UTF-8
|
Python
| false
| false
| 148,569
|
py
|
tensorforce.py
|
# Copyright 2020 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import logging
import tensorflow as tf
from tensorforce import TensorforceError
from tensorforce.core import ModuleDict, memory_modules, optimizer_modules, parameter_modules, \
SignatureDict, TensorDict, TensorSpec, TensorsSpec, tf_function, tf_util, VariableDict
from tensorforce.core.models import Model
from tensorforce.core.networks import Preprocessor
from tensorforce.core.objectives import objective_modules
from tensorforce.core.policies import policy_modules, StochasticPolicy
class TensorforceModel(Model):
def __init__(
self, *,
states, actions, max_episode_timesteps,
policy, memory, update, optimizer, objective, reward_estimation,
baseline, baseline_optimizer, baseline_objective,
l2_regularization, entropy_regularization,
state_preprocessing,
exploration, variable_noise,
parallel_interactions,
config, saver, summarizer, tracking
):
super().__init__(
states=states, actions=actions, l2_regularization=l2_regularization,
parallel_interactions=parallel_interactions, config=config, saver=saver,
summarizer=summarizer, tracking=tracking
)
if max_episode_timesteps is None:
self.max_episode_timesteps = None
else:
self.max_episode_timesteps = int(max_episode_timesteps)
# State preprocessing
self.processed_states_spec = TensorsSpec()
self.state_preprocessing = ModuleDict()
if state_preprocessing == 'linear_normalization':
# Default handling, otherwise layer will be applied to all input types
state_preprocessing = {
name: ['linear_normalization'] for name, spec in self.states_spec.items()
if spec.type == 'float' and spec.min_value is not None and
spec.max_value is not None
}
if not isinstance(state_preprocessing, dict) or \
any(name not in self.states_spec for name in state_preprocessing):
state_preprocessing = {name: state_preprocessing for name in self.states_spec}
for name, spec in self.states_spec.items():
if name in state_preprocessing:
layers = state_preprocessing[name]
elif spec.type in state_preprocessing:
layers = state_preprocessing[spec.type]
else:
layers = None
if layers is None:
self.processed_states_spec[name] = self.states_spec[name]
else:
if name is None:
module_name = 'state_preprocessing'
else:
module_name = name + '_preprocessing'
self.state_preprocessing[name] = self.submodule(
name=module_name, module=Preprocessor, is_trainable=False, input_spec=spec,
layers=layers
)
spec = self.state_preprocessing[name].output_spec()
self.processed_states_spec[name] = spec
if spec.type == 'float' and spec.min_value is not None and \
spec.max_value is not None:
if isinstance(spec.min_value, float):
if not (-10.0 <= spec.min_value < 0.0) or not (0.0 < spec.max_value <= 10.0):
logging.warning("{}tate{} does not seem to be normalized, consider "
"adding linear_normalization preprocessing.".format(
'S' if layers is None else 'Preprocessed s',
'' if name is None else ' ' + name
))
else:
# TODO: missing +/-10.0 check, but cases of values +/-inf are already covered by
# previous no-bound warning
if (spec.min_value >= 0.0).any() or (spec.max_value <= 0.0).any():
logging.warning("{}tate{} does not seem to be normalized, consider "
"adding linear_normalization preprocessing.".format(
'S' if layers is None else 'Preprocessed s',
'' if name is None else ' ' + name
))
# Action exploration
if exploration is None:
exploration = 0.0
if isinstance(exploration, dict) and all(name in self.actions_spec for name in exploration):
# Different exploration per action
self.exploration = ModuleDict()
for name, spec in self.actions_spec.items():
if name in exploration:
module = exploration[name]
elif spec.type in exploration:
module = exploration[spec.type]
else:
module = None
if module is None:
pass
elif spec.type in ('bool', 'int'):
self.exploration[name] = self.submodule(
name=(name + '_exploration'), module=module, modules=parameter_modules,
is_trainable=False, dtype='float', min_value=0.0, max_value=1.0
)
else:
self.exploration[name] = self.submodule(
name=(name + '_exploration'), module=module, modules=parameter_modules,
is_trainable=False, dtype='float', min_value=0.0
)
else:
# Same exploration for all actions
self.exploration = self.submodule(
name='exploration', module=exploration, modules=parameter_modules,
is_trainable=False, dtype='float', min_value=0.0
)
# Variable noise
if variable_noise is None:
variable_noise = 0.0
self.variable_noise = self.submodule(
name='variable_noise', module=variable_noise, modules=parameter_modules,
is_trainable=False, dtype='float', min_value=0.0
)
# Reward estimation argument check
if not all(key in (
'advantage_processing', 'discount', 'estimate_advantage', 'gae_decay', 'horizon',
'predict_action_values', 'predict_horizon_values', 'predict_terminal_values',
'return_processing', 'reward_processing', 'trace_decay'
) for key in reward_estimation):
raise TensorforceError.value(
name='agent', argument='reward_estimation', value=reward_estimation,
hint='not from {advantage_processing,discount,estimate_advantage,gae_decay,'
'horizon,predict_action_values,predict_horizon_values,predict_terminal_values,'
'return_processing,reward_processing,trace_decay}'
)
# Reward estimation
self.estimate_advantage = reward_estimation.get('estimate_advantage', False)
self.predict_horizon_values = reward_estimation.get('predict_horizon_values')
self.predict_action_values = reward_estimation.get('predict_action_values', False)
self.predict_terminal_values = reward_estimation.get('predict_terminal_values', False)
# Return horizon
if reward_estimation['horizon'] == 'episode':
self.reward_horizon = 'episode'
if self.predict_horizon_values is None:
self.predict_horizon_values = 'early'
elif self.predict_horizon_values == 'late':
raise TensorforceError.value(
name='agent', argument='reward_estimation[predict_horizon_values]',
value=self.predict_horizon_values,
condition='reward_estimation[reward_horizon] is "episode"'
)
else:
self.reward_horizon = self.submodule(
name='reward_horizon', module=reward_estimation['horizon'],
modules=parameter_modules, dtype='int', min_value=1,
max_value=self.max_episode_timesteps
)
if self.predict_horizon_values is None:
self.predict_horizon_values = 'late'
# Reward discount
reward_discount = reward_estimation.get('discount')
if reward_discount is None:
reward_discount = 1.0
self.reward_discount = self.submodule(
name='reward_discount', module=reward_discount, modules=parameter_modules,
dtype='float', min_value=0.0, max_value=1.0
)
# Entropy regularization
if entropy_regularization is None:
entropy_regularization = 0.0
self.entropy_regularization = self.submodule(
name='entropy_regularization', module=entropy_regularization,
modules=parameter_modules, is_trainable=False, dtype='float', min_value=0.0
)
# Update mode
if not all(key in ('batch_size', 'frequency', 'start', 'unit') for key in update):
raise TensorforceError.value(
name='agent', argument='update', value=list(update),
hint='not from {batch_size,frequency,start,unit}'
)
# update: unit
elif 'unit' not in update:
raise TensorforceError.required(name='agent', argument='update[unit]')
elif update['unit'] not in ('timesteps', 'episodes'):
raise TensorforceError.value(
name='agent', argument='update[unit]', value=update['unit'],
hint='not in {timesteps,episodes}'
)
# update: batch_size
elif 'batch_size' not in update:
raise TensorforceError.required(name='agent', argument='update[batch_size]')
self.update_unit = update['unit']
self.update_batch_size = self.submodule(
name='update_batch_size', module=update['batch_size'], modules=parameter_modules,
is_trainable=False, dtype='int', min_value=1
)
if 'frequency' in update and update['frequency'] == 'never':
self.update_frequency = None
else:
frequency = update.get('frequency')
if frequency is None:
frequency = update['batch_size']
elif isinstance(frequency, float):
if frequency <= 0.0 or frequency > 1.0:
raise TensorforceError.value(
name='agent', argument='update[frequency]', value=update['frequency'],
hint='not in (0.0, 1.0]'
)
else:
frequency = max(1, int(frequency * update['batch_size']))
self.update_frequency = self.submodule(
name='update_frequency', module=frequency, modules=parameter_modules,
is_trainable=False, dtype='int', min_value=1,
max_value=max(2, self.update_batch_size.max_value())
)
start = update.get('start')
if start is None:
start = 0
self.update_start = self.submodule(
name='update_start', module=start, modules=parameter_modules, is_trainable=False,
dtype='int', min_value=0
)
# Baseline optimization overview:
# Policy Objective Optimizer Config
# n n n default predict_horizon_values=False
# n n f default predict_horizon=False
# n n y default predict_horizon=False
# n y n main policy, shared loss/kldiv, weighted 1.0
# n y f main policy, shared loss/kldiv, weighted
# n y y main policy, separate
# y n n estimate_advantage=True,advantage_in_loss=True
# y n f shared objective/loss/kldiv, weighted
# y n y shared objective
# y y n shared loss/kldiv, weighted 1.0, equal horizon
# y y f shared loss/kldiv, weighted, equal horizon
# y y y separate
self.separate_baseline = (baseline is not None)
if baseline is None and baseline_objective is None and \
'predict_horizon_values' not in reward_estimation:
self.predict_horizon_values = False
if baseline is not None and baseline_objective is None and \
baseline_optimizer is None:
if 'estimate_advantage' not in reward_estimation:
self.estimate_advantage = 'late'
self.advantage_in_loss = True
else:
self.advantage_in_loss = False
if baseline_optimizer is None and baseline_objective is not None:
baseline_optimizer = 1.0
if baseline_optimizer is None or isinstance(baseline_optimizer, float):
baseline_is_trainable = True
else:
baseline_is_trainable = False
# Reward processing
reward_processing = reward_estimation.get('reward_processing')
if reward_processing is None:
self.reward_processing = None
else:
self.reward_processing = self.submodule(
name='reward_processing', module=Preprocessor, is_trainable=False,
input_spec=self.reward_spec, layers=reward_processing
)
if self.reward_processing.output_spec() != self.reward_spec:
raise TensorforceError.mismatch(
name='reward_estimation[reward_processing]', argument='output spec',
value1=self.reward_processing.output_spec(), value2=self.reward_spec
)
# Return processing
return_processing = reward_estimation.get('return_processing')
if return_processing is None:
self.return_processing = None
else:
self.return_processing = self.submodule(
name='return_processing', module=Preprocessor, is_trainable=False,
input_spec=self.reward_spec, layers=return_processing,
is_preprocessing_layer_valid=False
)
if self.return_processing.output_spec() != self.reward_spec:
raise TensorforceError.mismatch(
name='reward_estimation[return_processing]', argument='output spec',
value1=self.return_processing.output_spec(), value2=self.reward_spec
)
# Advantage processing
advantage_processing = reward_estimation.get('advantage_processing')
if advantage_processing is None:
self.advantage_processing = None
else:
if self.estimate_advantage is False:
raise TensorforceError.invalid(
name='agent', argument='reward_estimation[advantage_processing]',
condition='reward_estimation[estimate_advantage] is false'
)
self.advantage_processing = self.submodule(
name='advantage_processing', module=Preprocessor, is_trainable=False,
input_spec=self.reward_spec, layers=advantage_processing,
is_preprocessing_layer_valid=False
)
if self.advantage_processing.output_spec() != self.reward_spec:
raise TensorforceError.mismatch(
name='reward_estimation[advantage_processing]', argument='output spec',
value1=self.advantage_processing.output_spec(), value2=self.reward_spec
)
# Objectives
self.objective = self.submodule(
name='policy_objective', module=objective, modules=objective_modules,
states_spec=self.processed_states_spec, auxiliaries_spec=self.auxiliaries_spec,
actions_spec=self.actions_spec, reward_spec=self.reward_spec
)
if baseline_objective is None:
self.baseline_objective = None
else:
self.baseline_objective = self.submodule(
name='baseline_objective', module=baseline_objective, modules=objective_modules,
is_trainable=baseline_is_trainable, states_spec=self.processed_states_spec,
auxiliaries_spec=self.auxiliaries_spec, actions_spec=self.actions_spec,
reward_spec=self.reward_spec
)
assert len(self.baseline_objective.required_baseline_fns()) == 0
# Policy
required_fns = {'policy'}
required_fns.update(self.objective.required_policy_fns())
if not self.separate_baseline:
if self.predict_horizon_values is not False or self.estimate_advantage is not False:
if self.predict_action_values:
required_fns.add('action_value')
else:
required_fns.add('state_value')
required_fns.update(self.objective.required_baseline_fns())
if self.baseline_objective is not None:
required_fns.update(self.baseline_objective.required_policy_fns())
if required_fns <= {'state_value'}:
default_module = 'parametrized_state_value'
elif required_fns <= {'action_value'} and \
all(spec.type == 'float' for spec in self.actions_spec.values()):
default_module = 'parametrized_action_value'
elif required_fns <= {'policy', 'action_value', 'state_value'} and \
all(spec.type in ('bool', 'int') for spec in self.actions_spec.values()):
default_module = 'parametrized_value_policy'
elif required_fns <= {'policy', 'stochastic'}:
default_module = 'parametrized_distributions'
else:
logging.warning(
"Policy type should be explicitly specified for non-standard agent configuration."
)
default_module = 'parametrized_distributions'
self.policy = self.submodule(
name='policy', module=policy, modules=policy_modules, default_module=default_module,
states_spec=self.processed_states_spec, auxiliaries_spec=self.auxiliaries_spec,
actions_spec=self.actions_spec
)
self.internals_spec['policy'] = self.policy.internals_spec
self.initial_internals['policy'] = self.policy.internals_init()
self.objective.internals_spec = self.policy.internals_spec
if not self.entropy_regularization.is_constant(value=0.0) and \
not isinstance(self.policy, StochasticPolicy):
raise TensorforceError.invalid(
name='agent', argument='entropy_regularization',
condition='policy is not stochastic'
)
# Baseline
if self.separate_baseline:
if self.predict_horizon_values is not False or self.estimate_advantage is not False:
if self.predict_action_values:
required_fns = {'action_value'}
else:
required_fns = {'state_value'}
required_fns.update(self.objective.required_baseline_fns())
if self.baseline_objective is not None:
required_fns.update(self.baseline_objective.required_policy_fns())
if required_fns <= {'state_value'}:
default_module = 'parametrized_state_value'
elif required_fns <= {'action_value'} and \
all(spec.type == 'float' for spec in self.actions_spec.values()):
default_module = 'parametrized_action_value'
elif required_fns <= {'policy', 'action_value', 'state_value'} and \
all(spec.type in ('bool', 'int') for spec in self.actions_spec.values()):
default_module = 'parametrized_value_policy'
elif required_fns <= {'policy', 'stochastic'}:
default_module = 'parametrized_distributions'
else:
logging.warning("Policy type should be explicitly specified for non-standard agent "
"configuration.")
default_module = 'parametrized_distributions'
self.baseline = self.submodule(
name='baseline', module=baseline, modules=policy_modules,
default_module=default_module, is_trainable=baseline_is_trainable,
states_spec=self.processed_states_spec, auxiliaries_spec=self.auxiliaries_spec,
actions_spec=self.actions_spec
)
self.internals_spec['baseline'] = self.baseline.internals_spec
self.initial_internals['baseline'] = self.baseline.internals_init()
else:
self.baseline = self.policy
if self.baseline_objective is not None:
self.baseline_objective.internals_spec = self.baseline.internals_spec
# Check for name collisions
for name in self.internals_spec:
if name in self.value_names:
raise TensorforceError.exists(name='value name', value=name)
self.value_names.add(name)
# Optimizers
if baseline_optimizer is None:
self.baseline_loss_weight = None
internals_spec = self.internals_spec
self.baseline_optimizer = None
elif isinstance(baseline_optimizer, float):
self.baseline_loss_weight = self.submodule(
name='baseline_loss_weight', module=baseline_optimizer, modules=parameter_modules,
is_trainable=False, dtype='float', min_value=0.0
)
internals_spec = self.internals_spec
self.baseline_optimizer = None
else:
self.baseline_loss_weight = None
internals_spec = self.internals_spec['policy']
if self.separate_baseline:
baseline_internals = self.internals_spec['baseline']
else:
baseline_internals = self.internals_spec['policy']
arguments_spec = TensorsSpec(
states=self.processed_states_spec, horizons=TensorSpec(type='int', shape=(2,)),
internals=baseline_internals, auxiliaries=self.auxiliaries_spec,
actions=self.actions_spec, reward=self.reward_spec
)
if self.baseline_objective is not None:
arguments_spec['reference'] = self.baseline_objective.reference_spec()
self.baseline_optimizer = self.submodule(
name='baseline_optimizer', module=baseline_optimizer, modules=optimizer_modules,
is_trainable=False, arguments_spec=arguments_spec
)
arguments_spec = TensorsSpec(
states=self.processed_states_spec, horizons=TensorSpec(type='int', shape=(2,)),
internals=internals_spec, auxiliaries=self.auxiliaries_spec, actions=self.actions_spec,
reward=self.reward_spec
)
if self.baseline_objective is not None and self.baseline_loss_weight is not None and \
not self.baseline_loss_weight.is_constant(value=0.0):
arguments_spec['reference'] = TensorsSpec(
policy=self.objective.reference_spec(),
baseline=self.baseline_objective.reference_spec()
)
else:
arguments_spec['reference'] = self.objective.reference_spec()
self.optimizer = self.submodule(
name='policy_optimizer', module=optimizer, modules=optimizer_modules,
arguments_spec=arguments_spec
)
# Memory
values_spec = TensorsSpec(
states=self.processed_states_spec, internals=self.internals_spec,
auxiliaries=self.auxiliaries_spec, actions=self.actions_spec,
terminal=self.terminal_spec, reward=self.reward_spec
)
if self.update_unit == 'timesteps':
max_past_horizon = max(
self.policy.max_past_horizon(on_policy=False),
self.baseline.max_past_horizon(on_policy=False)
)
min_capacity = self.update_batch_size.max_value() + 1 + max_past_horizon
if self.reward_horizon == 'episode':
min_capacity += self.max_episode_timesteps
else:
min_capacity += self.reward_horizon.max_value()
if self.max_episode_timesteps is not None:
min_capacity = max(min_capacity, self.max_episode_timesteps)
elif self.update_unit == 'episodes':
if self.max_episode_timesteps is None:
min_capacity = None
else:
min_capacity = (self.update_batch_size.max_value() + 1) * self.max_episode_timesteps
else:
assert False
if self.config.buffer_observe == 'episode':
if self.max_episode_timesteps is not None:
min_capacity = max(min_capacity, 2 * self.max_episode_timesteps)
elif isinstance(self.config.buffer_observe, int):
if min_capacity is None:
min_capacity = 2 * self.config.buffer_observe
else:
min_capacity = max(min_capacity, 2 * self.config.buffer_observe)
self.memory = self.submodule(
name='memory', module=memory, modules=memory_modules, is_trainable=False,
values_spec=values_spec, min_capacity=min_capacity
)
# Trace decay
trace_decay = reward_estimation.get('trace_decay', 1.0)
if trace_decay != 1.0 and self.predict_horizon_values != 'early':
raise TensorforceError.invalid(
name='agent', argument='reward_estimation[trace_decay]',
condition='reward_estimation[predict_horizon_values] != "early"'
)
self.trace_decay = self.submodule(
name='trace_decay', module=trace_decay, modules=parameter_modules, dtype='float',
min_value=0.0, max_value=1.0
)
# GAE decay
gae_decay = reward_estimation.get('gae_decay', 0.0)
if gae_decay != 0.0:
from tensorforce.core.memories import Recent
if not isinstance(self.memory, Recent):
raise TensorforceError.invalid(
name='agent', argument='reward_estimation[gae_decay]',
condition='memory type is not Recent'
)
elif self.estimate_advantage is False:
raise TensorforceError.invalid(
name='agent', argument='reward_estimation[gae_decay]',
condition='reward_estimation[estimate_advantage] is false'
)
elif self.advantage_in_loss:
raise TensorforceError.invalid(
name='agent', argument='reward_estimation[gae_decay]',
condition='advantage-in-loss mode'
)
self.gae_decay = self.submodule(
name='gae_decay', module=gae_decay, modules=parameter_modules, dtype='float',
min_value=0.0, max_value=1.0
)
def get_architecture(self):
if self.state_preprocessing.is_singleton():
architecture = 'State-preprocessing: {}\n'.format(
self.state_preprocessing.singleton().get_architecture().replace('\n', '\n ')
)
else:
architecture = ''
for name, preprocessor in self.state_preprocessing.items():
architecture += ' {}: {}\n'.format(
name, preprocessor.get_architecture().replace('\n', '\n ')
)
if len(architecture) > 0:
architecture = 'State-preprocessing:\n' + architecture
architecture = 'Policy:\n {}'.format(
self.policy.get_architecture().replace('\n', '\n ')
)
if self.separate_baseline:
architecture += '\nBaseline:\n {}'.format(
self.baseline.get_architecture().replace('\n', '\n ')
)
elif self.predict_horizon_values or self.baseline_objective is not None:
architecture += '\nBaseline: policy used as baseline'
return architecture
def initialize(self):
super().initialize()
# Initial variables summaries
if self.summaries == 'all' or 'variables' in self.summaries:
with self.summarizer.as_default():
for variable in self.trainable_variables:
name = variable.name
assert name[-2] == ':'
if name.startswith(self.name + '/'):
# Add prefix self.name since otherwise different scope from later summaries
name = self.name + '/variables/' + name[len(self.name) + 1: -2]
else:
name = name[:-2]
x = tf.math.reduce_mean(input_tensor=variable)
tf.summary.scalar(name=name, data=x, step=self.updates)
def core_initialize(self):
super().core_initialize()
# Preprocessed episode reward
if self.reward_processing is not None:
self.preprocessed_episode_return = self.variable(
name='preprocessed-episode-return',
spec=TensorSpec(type=self.reward_spec.type, shape=(self.parallel_interactions,)),
initializer='zeros', is_trainable=False, is_saved=False
)
# Buffer index
self.buffer_index = self.variable(
name='buffer-index', spec=TensorSpec(type='int', shape=(self.parallel_interactions,)),
initializer='zeros', is_trainable=False, is_saved=False
)
if self.reward_horizon == 'episode' or self.parallel_interactions > 1 or \
self.config.buffer_observe == 'episode':
capacity = self.max_episode_timesteps
else:
capacity = self.config.buffer_observe + self.reward_horizon.max_value()
if self.max_episode_timesteps is not None:
capacity = min(capacity, self.max_episode_timesteps)
# States/internals/auxiliaries/actions buffers
def function(name, spec):
shape = (self.parallel_interactions, capacity) + spec.shape
return self.variable(
name=(name + '-buffer'), spec=TensorSpec(type=spec.type, shape=shape),
initializer='zeros', is_trainable=False, is_saved=False
)
self.states_buffer = self.processed_states_spec.fmap(
function=function, cls=VariableDict, with_names='states'
)
self.internals_buffer = self.internals_spec.fmap(
function=function, cls=VariableDict, with_names=True
)
self.auxiliaries_buffer = self.auxiliaries_spec.fmap(
function=function, cls=VariableDict, with_names='action'
)
self.actions_buffer = self.actions_spec.fmap(
function=function, cls=VariableDict, with_names='actions'
)
# Terminal/reward buffer
if self.config.buffer_observe != 'episode':
self.terminal_buffer = function('terminal', self.terminal_spec)
self.reward_buffer = function('reward', self.reward_spec)
# Buffer start
if self.reward_horizon != 'episode' and self.parallel_interactions == 1 and \
self.config.buffer_observe != 'episode':
self.circular_buffer = True
self.buffer_capacity = capacity
self.buffer_start = self.variable(
name='buffer-start',
spec=TensorSpec(type='int', shape=(self.parallel_interactions,)),
initializer='zeros', is_trainable=False, is_saved=False
)
else:
self.circular_buffer = False
# Last update
self.last_update = self.variable(
name='last-update', spec=TensorSpec(type='int'),
initializer=-self.update_frequency.max_value(), is_trainable=False, is_saved=True
)
# Optimizer initialize given variables
if self.advantage_in_loss:
self.optimizer.initialize_given_variables(variables=self.trainable_variables)
else:
self.optimizer.initialize_given_variables(variables=self.policy.trainable_variables)
if self.baseline_optimizer is not None:
self.baseline_optimizer.initialize_given_variables(
variables=self.baseline.trainable_variables
)
# Summaries and tracking
self.register_summary(label='loss', name='losses/policy-objective-loss')
self.register_tracking(
label='loss', name='policy-objective-loss', spec=TensorSpec(type='float')
)
self.register_summary(label='loss', name='losses/policy-regularization-loss')
self.register_tracking(
label='loss', name='policy-regularization-loss', spec=TensorSpec(type='float')
)
self.register_summary(label='loss', name='losses/policy-loss')
self.register_tracking(label='loss', name='policy-loss', spec=TensorSpec(type='float'))
if self.baseline_optimizer is not None or (
self.baseline_loss_weight is not None and
not self.baseline_loss_weight.is_constant(value=0.0)
):
self.register_summary(label='loss', name='losses/baseline-loss')
self.register_tracking(label='loss', name='baseline-loss', spec=TensorSpec(type='float'))
if self.separate_baseline:
self.register_summary(label='loss', name='losses/baseline-objective-loss')
self.register_tracking(
label='loss', name='baseline-objective-loss', spec=TensorSpec(type='float')
)
self.register_summary(label='loss', name='losses/baseline-regularization-loss')
self.register_tracking(
label='loss', name='baseline-regularization-loss',
spec=TensorSpec(type='float')
)
if self.reward_processing is not None:
self.register_tracking(
label='reward', name='preprocessed-reward', spec=TensorSpec(type='float')
)
self.register_tracking(
label='reward', name='preprocessed-episode-return', spec=TensorSpec(type='float')
)
self.register_tracking(label='reward', name='update-return', spec=TensorSpec(type='float'))
if self.return_processing is not None:
self.register_tracking(
label='reward', name='update-processed-return', spec=TensorSpec(type='float')
)
if self.estimate_advantage is not False:
self.register_tracking(
label='reward', name='update-advantage', spec=TensorSpec(type='float')
)
if self.advantage_processing is not None:
self.register_tracking(
label='reward', name='update-processed-advantage',
spec=TensorSpec(type='float')
)
if not self.gae_decay.is_constant(value=0.0):
self.register_tracking(
label='reward', name='update-gae', spec=TensorSpec(type='float')
)
self.register_tracking(label='entropy', name='entropy', spec=TensorSpec(type='float'))
self.register_tracking(
label='kl-divergence', name='kl-divergence', spec=TensorSpec(type='float')
)
if len(self.actions_spec) > 1:
for name in self.actions_spec:
self.register_tracking(
label='entropy', name=('entropies/' + name), spec=TensorSpec(type='float')
)
self.register_tracking(
label='kl-divergence', name=('kl-divergences/' + name),
spec=TensorSpec(type='float')
)
def initialize_api(self):
super().initialize_api()
if 'graph' in self.summaries:
tf.summary.trace_on(graph=True, profiler=False)
self.experience(
states=self.states_spec, internals=self.internals_spec,
auxiliaries=self.auxiliaries_spec, actions=self.actions_spec,
terminal=self.terminal_spec, reward=self.reward_spec, _initialize=True
)
if 'graph' in self.summaries:
tf.summary.trace_export(name='experience', step=self.timesteps, profiler_outdir=None)
tf.summary.trace_on(graph=True, profiler=False)
self.update(_initialize=True)
if 'graph' in self.summaries:
tf.summary.trace_export(name='update', step=self.timesteps, profiler_outdir=None)
def get_savedmodel_trackables(self):
trackables = super().get_savedmodel_trackables()
for name, trackable in self.policy.get_savedmodel_trackables().items():
assert name not in trackables
trackables[name] = trackable
if self.separate_baseline and len(self.internals_spec['baseline']) > 0:
for name, trackable in self.baseline.get_savedmodel_trackables().items():
assert name not in trackables
trackables[name] = trackable
return trackables
def input_signature(self, *, function):
if function == 'baseline_loss':
if self.separate_baseline:
internals_signature = self.internals_spec['baseline'].signature(batched=True)
else:
internals_signature = self.internals_spec['policy'].signature(batched=True)
if self.advantage_in_loss:
assert False
elif self.baseline_objective is None:
return SignatureDict(
states=self.processed_states_spec.signature(batched=True),
horizons=TensorSpec(type='int', shape=(2,)).signature(batched=True),
internals=internals_signature,
auxiliaries=self.auxiliaries_spec.signature(batched=True),
actions=self.actions_spec.signature(batched=True),
reward=self.reward_spec.signature(batched=True),
reference=self.objective.reference_spec().signature(batched=True)
)
else:
return SignatureDict(
states=self.processed_states_spec.signature(batched=True),
horizons=TensorSpec(type='int', shape=(2,)).signature(batched=True),
internals=internals_signature,
auxiliaries=self.auxiliaries_spec.signature(batched=True),
actions=self.actions_spec.signature(batched=True),
reward=self.reward_spec.signature(batched=True),
reference=self.baseline_objective.reference_spec().signature(batched=True)
)
elif function == 'core_experience':
return SignatureDict(
states=self.processed_states_spec.signature(batched=True),
internals=self.internals_spec.signature(batched=True),
auxiliaries=self.auxiliaries_spec.signature(batched=True),
actions=self.actions_spec.signature(batched=True),
terminal=self.terminal_spec.signature(batched=True),
reward=self.reward_spec.signature(batched=True)
)
elif function == 'core_update':
return SignatureDict()
elif function == 'experience':
return SignatureDict(
states=self.states_spec.signature(batched=True),
internals=self.internals_spec.signature(batched=True),
auxiliaries=self.auxiliaries_spec.signature(batched=True),
actions=self.actions_spec.signature(batched=True),
terminal=self.terminal_spec.signature(batched=True),
reward=self.reward_spec.signature(batched=True)
)
elif function == 'loss':
if self.baseline_objective is not None and self.baseline_loss_weight is not None and \
not self.baseline_loss_weight.is_constant(value=0.0):
return SignatureDict(
states=self.processed_states_spec.signature(batched=True),
horizons=TensorSpec(type='int', shape=(2,)).signature(batched=True),
internals=self.internals_spec.signature(batched=True),
auxiliaries=self.auxiliaries_spec.signature(batched=True),
actions=self.actions_spec.signature(batched=True),
reward=self.reward_spec.signature(batched=True),
reference=SignatureDict(
policy=self.objective.reference_spec().signature(batched=True),
baseline=self.baseline_objective.reference_spec().signature(batched=True)
)
)
elif self.baseline_optimizer is None:
return SignatureDict(
states=self.processed_states_spec.signature(batched=True),
horizons=TensorSpec(type='int', shape=(2,)).signature(batched=True),
internals=self.internals_spec.signature(batched=True),
auxiliaries=self.auxiliaries_spec.signature(batched=True),
actions=self.actions_spec.signature(batched=True),
reward=self.reward_spec.signature(batched=True),
reference=self.objective.reference_spec().signature(batched=True)
)
else:
return SignatureDict(
states=self.processed_states_spec.signature(batched=True),
horizons=TensorSpec(type='int', shape=(2,)).signature(batched=True),
internals=self.internals_spec['policy'].signature(batched=True),
auxiliaries=self.auxiliaries_spec.signature(batched=True),
actions=self.actions_spec.signature(batched=True),
reward=self.reward_spec.signature(batched=True),
reference=self.objective.reference_spec().signature(batched=True)
)
elif function == 'regularize':
return SignatureDict(
states=self.processed_states_spec.signature(batched=True),
horizons=TensorSpec(type='int', shape=(2,)).signature(batched=True),
internals=self.internals_spec['policy'].signature(batched=True),
auxiliaries=self.auxiliaries_spec.signature(batched=True)
)
elif function == 'update':
return SignatureDict()
else:
return super().input_signature(function=function)
def output_signature(self, *, function):
if function == 'baseline_loss':
return SignatureDict(
singleton=TensorSpec(type='float', shape=()).signature(batched=False)
)
elif function == 'core_experience':
return SignatureDict(
singleton=TensorSpec(type='bool', shape=()).signature(batched=False)
)
elif function == 'core_update':
return SignatureDict(
singleton=TensorSpec(type='bool', shape=()).signature(batched=False)
)
elif function == 'experience':
return SignatureDict(
timesteps=TensorSpec(type='int', shape=()).signature(batched=False),
episodes=TensorSpec(type='int', shape=()).signature(batched=False)
)
elif function == 'loss':
return SignatureDict(
singleton=TensorSpec(type='float', shape=()).signature(batched=False)
)
elif function == 'update':
return SignatureDict(
singleton=TensorSpec(type='int', shape=()).signature(batched=False)
)
else:
return super().output_signature(function=function)
@tf_function(num_args=0, api_function=True)
def reset(self):
operations = list()
zeros = tf_util.zeros(shape=(self.parallel_interactions,), dtype='int')
operations.append(self.buffer_index.assign(value=zeros, read_value=False))
if self.circular_buffer:
operations.append(self.buffer_start.assign(value=zeros, read_value=False))
operations.append(self.memory.reset())
# TODO: Synchronization optimizer initial sync?
with tf.control_dependencies(control_inputs=operations):
return super().reset()
@tf_function(num_args=6, api_function=True)
def experience(self, *, states, internals, auxiliaries, actions, terminal, reward):
true = tf_util.constant(value=True, dtype='bool')
one = tf_util.constant(value=1, dtype='int')
batch_size = tf_util.cast(x=tf.shape(input=terminal)[0], dtype='int')
# Input assertions
assertions = list()
if self.config.create_tf_assertions:
zero = tf_util.constant(value=0, dtype='int')
assertions.extend(self.states_spec.tf_assert(
x=states, batch_size=batch_size,
message='Agent.experience: invalid {issue} for {name} state input.'
))
assertions.extend(self.internals_spec.tf_assert(
x=internals, batch_size=batch_size,
message='Agent.experience: invalid {issue} for {name} internal input.'
))
assertions.extend(self.auxiliaries_spec.tf_assert(
x=auxiliaries, batch_size=batch_size,
message='Agent.experience: invalid {issue} for {name} input.'
))
assertions.extend(self.actions_spec.tf_assert(
x=actions, batch_size=batch_size,
message='Agent.experience: invalid {issue} for {name} action input.'
))
assertions.extend(self.terminal_spec.tf_assert(
x=terminal, batch_size=batch_size,
message='Agent.experience: invalid {issue} for terminal input.'
))
assertions.extend(self.reward_spec.tf_assert(
x=reward, batch_size=batch_size,
message='Agent.experience: invalid {issue} for reward input.'
))
# Mask assertions
if self.config.enable_int_action_masking:
for name, spec in self.actions_spec.items():
if spec.type == 'int' and spec.num_values is not None:
is_valid = tf.reduce_all(input_tensor=tf.gather(
params=auxiliaries[name]['mask'],
indices=tf.expand_dims(input=actions[name], axis=(spec.rank + 1)),
batch_dims=(spec.rank + 1)
))
assertions.append(tf.debugging.assert_equal(
x=is_valid, y=true, message="Agent.experience: invalid action / mask."
))
# Assertion: buffer indices is zero
assertions.append(tf.debugging.assert_equal(
x=tf.math.reduce_sum(input_tensor=self.buffer_index, axis=0), y=zero,
message="Agent.experience: cannot be called mid-episode."
))
# Assertion: one terminal
num_terms = tf.math.count_nonzero(input=terminal, dtype=tf_util.get_dtype(type='int'))
assertions.append(tf.debugging.assert_equal(
x=num_terms, y=one,
message="Agent.experience: input contains none or more than one terminal."
))
# Assertion: terminal is last timestep in batch
assertions.append(tf.debugging.assert_greater_equal(
x=terminal[-1], y=one,
message="Agent.experience: terminal is not the last input timestep."
))
with tf.control_dependencies(control_inputs=assertions):
# Preprocessing
for name in states:
if name in self.state_preprocessing:
states[name] = self.state_preprocessing[name].apply(
x=states[name], deterministic=true, independent=False
)
if self.reward_processing is not None:
reward = self.reward_processing.apply(
x=reward, deterministic=true, independent=False
)
# Core experience
experienced = self.core_experience(
states=states, internals=internals, auxiliaries=auxiliaries, actions=actions,
terminal=terminal, reward=reward
)
# Increment timestep and episode
with tf.control_dependencies(control_inputs=(experienced,)):
assignments = list()
assignments.append(self.timesteps.assign_add(delta=batch_size, read_value=False))
assignments.append(self.episodes.assign_add(delta=one, read_value=False))
with tf.control_dependencies(control_inputs=assignments):
timestep = tf_util.identity(input=self.timesteps)
episode = tf_util.identity(input=self.episodes)
return timestep, episode
@tf_function(num_args=0, api_function=True)
def update(self):
# Core update
updated = self.core_update()
with tf.control_dependencies(control_inputs=(updated,)):
return tf_util.identity(input=self.updates)
@tf_function(num_args=5)
def core_act(self, *, states, internals, auxiliaries, parallel, deterministic, independent):
zero_float = tf_util.constant(value=0.0, dtype='float')
# On-policy policy/baseline horizon (TODO: retrieve from buffer!)
assertions = list()
if self.config.create_tf_assertions:
zero = tf_util.constant(value=0, dtype='int')
past_horizon = tf.math.maximum(
x=self.policy.past_horizon(on_policy=True),
y=self.baseline.past_horizon(on_policy=True)
)
assertions.append(tf.debugging.assert_equal(
x=past_horizon, y=zero,
message="Policy/baseline on-policy horizon currently not supported."
))
if not independent:
false = tf_util.constant(value=False, dtype='bool')
assertions.append(tf.debugging.assert_equal(
x=deterministic, y=false,
message="Invalid combination deterministic and not independent."
))
# Variable noise
if len(self.policy.trainable_variables) > 0 and (
(not independent and not self.variable_noise.is_constant(value=0.0)) or
(independent and self.variable_noise.final_value() != 0.0)
):
if independent:
variable_noise = tf_util.constant(
value=self.variable_noise.final_value(), dtype=self.variable_noise.spec.type
)
else:
variable_noise = self.variable_noise.value()
def no_variable_noise():
return [tf.zeros_like(input=var) for var in self.policy.trainable_variables]
def apply_variable_noise():
variable_noise_tensors = list()
for variable in self.policy.trainable_variables:
noise = tf.random.normal(
shape=tf_util.shape(x=variable), mean=0.0, stddev=variable_noise,
dtype=self.variable_noise.spec.tf_type()
)
if variable.dtype != tf_util.get_dtype(type='float'):
noise = tf.cast(x=noise, dtype=variable.dtype)
assignment = variable.assign_add(delta=noise, read_value=False)
with tf.control_dependencies(control_inputs=(assignment,)):
variable_noise_tensors.append(tf_util.identity(input=noise))
return variable_noise_tensors
variable_noise_tensors = tf.cond(
pred=tf.math.logical_or(
x=deterministic, y=tf.math.equal(x=variable_noise, y=zero_float)
), true_fn=no_variable_noise, false_fn=apply_variable_noise
)
else:
variable_noise_tensors = list()
with tf.control_dependencies(control_inputs=(variable_noise_tensors + assertions)):
dependencies = list()
# State preprocessing (after variable noise)
for name in self.states_spec:
if name in self.state_preprocessing:
states[name] = self.state_preprocessing[name].apply(
x=states[name], deterministic=deterministic, independent=independent
)
# Policy act (after variable noise)
batch_size = tf_util.cast(x=tf.shape(input=states.value())[0], dtype='int')
starts = tf.range(batch_size, dtype=tf_util.get_dtype(type='int'))
lengths = tf_util.ones(shape=tf.expand_dims(input=batch_size, axis=0), dtype='int')
horizons = tf.stack(values=(starts, lengths), axis=1)
next_internals = TensorDict()
actions, next_internals['policy'] = self.policy.act(
states=states, horizons=horizons, internals=internals['policy'],
auxiliaries=auxiliaries, deterministic=deterministic, independent=independent
)
if isinstance(actions, tf.Tensor):
dependencies.append(actions)
else:
dependencies.extend(actions.flatten())
# Baseline internals (after variable noise)
# TODO: shouldn't be required for independent-act
if self.separate_baseline and len(self.internals_spec['baseline']) > 0:
next_internals['baseline'] = self.baseline.next_internals(
states=states, horizons=horizons, internals=internals['baseline'],
actions=actions, deterministic=deterministic, independent=independent
)
else:
next_internals['baseline'] = TensorDict()
dependencies.extend(next_internals.flatten())
# Reverse variable noise (after policy act)
if len(variable_noise_tensors) > 0:
with tf.control_dependencies(control_inputs=dependencies):
dependencies = list()
def apply_variable_noise():
assignments = list()
for var, noise in zip(self.policy.trainable_variables, variable_noise_tensors):
assignments.append(var.assign_sub(delta=noise, read_value=False))
return tf.group(*assignments)
dependencies.append(tf.cond(
pred=tf.math.equal(x=variable_noise, y=zero_float),
true_fn=tf.no_op, false_fn=apply_variable_noise
))
# Exploration
if (not independent and (
isinstance(self.exploration, dict) or not self.exploration.is_constant(value=0.0)
)) or (independent and (
isinstance(self.exploration, dict) or self.exploration.final_value() != 0.0
)):
# Global exploration
if not isinstance(self.exploration, dict):
# exploration_fns = dict()
if not independent and not self.exploration.is_constant(value=0.0):
exploration = self.exploration.value()
elif independent and self.exploration.final_value() != 0.0:
exploration = tf_util.constant(
value=self.exploration.final_value(), dtype=self.exploration.spec.type
)
else:
assert False
float_dtype = tf_util.get_dtype(type='float')
for name, spec, action in self.actions_spec.zip_items(actions):
# Per-action exploration
if isinstance(self.exploration, dict):
if name not in self.exploration:
continue
elif not independent and not self.exploration[name].is_constant(value=0.0):
exploration = self.exploration.value()
elif independent and self.exploration[name].final_value() != 0.0:
exploration = tf_util.constant(
value=self.exploration[name].final_value(),
dtype=self.exploration[name].spec.type
)
else:
continue
# Apply exploration
if spec.type == 'bool':
# Bool action: if uniform[0, 1] < exploration, then uniform[True, False]
def apply_exploration():
shape = tf_util.cast(x=tf.shape(input=action), dtype='int')
half = tf_util.constant(value=0.5, dtype='float')
random_action = tf.random.uniform(shape=shape, dtype=float_dtype) < half
is_random = tf.random.uniform(shape=shape, dtype=float_dtype) < exploration
return tf.where(condition=is_random, x=random_action, y=action)
elif spec.type == 'int' and spec.num_values is not None:
if self.config.enable_int_action_masking:
# Masked action: if uniform[0, 1] < exploration, then uniform[unmasked]
# (Similar code as for RandomModel.core_act)
def apply_exploration():
shape = tf_util.cast(x=tf.shape(input=action), dtype='int')
mask = auxiliaries[name]['mask']
choices = tf_util.constant(
value=list(range(spec.num_values)), dtype=spec.type,
shape=(tuple(1 for _ in spec.shape) + (1, spec.num_values))
)
one = tf_util.constant(value=1, dtype='int', shape=(1,))
multiples = tf.concat(values=(shape, one), axis=0)
choices = tf.tile(input=choices, multiples=multiples)
choices = tf.boolean_mask(tensor=choices, mask=mask)
num_valid = tf.math.count_nonzero(input=mask, axis=(spec.rank + 1))
num_valid = tf.reshape(tensor=num_valid, shape=(-1,))
masked_offset = tf.math.cumsum(x=num_valid, axis=0, exclusive=True)
uniform = tf.random.uniform(shape=shape, dtype=float_dtype)
uniform = tf.reshape(tensor=uniform, shape=(-1,))
num_valid = tf_util.cast(x=num_valid, dtype='float')
random_offset = tf.dtypes.cast(
x=(uniform * num_valid), dtype=tf.dtypes.int64
)
random_action = tf.gather(
params=choices, indices=(masked_offset + random_offset)
)
random_action = tf.reshape(tensor=random_action, shape=shape)
is_random = tf.random.uniform(shape=shape, dtype=float_dtype)
is_random = is_random < exploration
return tf.where(condition=is_random, x=random_action, y=action)
else:
# Int action: if uniform[0, 1] < exploration, then uniform[num_values]
def apply_exploration():
shape = tf_util.cast(x=tf.shape(input=action), dtype='int')
random_action = tf.random.uniform(
shape=shape, maxval=spec.num_values, dtype=spec.tf_type()
)
is_random = tf.random.uniform(shape=shape, dtype=float_dtype)
is_random = is_random < exploration
return tf.where(condition=is_random, x=random_action, y=action)
else:
# Int/float action: action + normal[0, exploration]
def apply_exploration():
shape = tf_util.cast(x=tf.shape(input=action), dtype='int')
noise = tf.random.normal(shape=shape, dtype=spec.tf_type())
x = action + noise * exploration
# Clip action if left-/right-bounded
if spec.min_value is not None:
x = tf.math.maximum(x=x, y=spec.min_value)
if spec.max_value is not None:
x = tf.math.minimum(x=x, y=spec.max_value)
return x
# if isinstance(self.exploration, dict):
# Per-action exploration
actions[name] = tf.cond(
pred=tf.math.logical_or(
x=deterministic, y=tf.math.equal(x=exploration, y=zero_float)
), true_fn=(lambda: action), false_fn=apply_exploration
)
# else:
# exploration_fns[name] = apply_exploration
# if not isinstance(self.exploration, dict):
# # Global exploration
# def apply_exploration():
# for name in self.actions_spec:
# actions[name] = exploration_fns[name]()
# return actions
# actions = tf.cond(
# pred=tf.math.equal(x=exploration, y=zero_float),
# true_fn=(lambda: actions), false_fn=apply_exploration
# )
# Update states/internals/auxiliaries/actions buffers
if not independent:
assignments = list()
buffer_index = tf.gather(params=self.buffer_index, indices=parallel)
if self.circular_buffer:
buffer_index = tf.math.mod(x=buffer_index, y=self.buffer_capacity)
indices = tf.stack(values=(parallel, buffer_index), axis=1)
for name, buffer, state in self.states_buffer.zip_items(states):
value = tf.tensor_scatter_nd_update(tensor=buffer, indices=indices, updates=state)
assignments.append(buffer.assign(value=value))
# assignments.append(buffer.scatter_nd_update(indices=indices, updates=state))
for name, buffer, internal in self.internals_buffer.zip_items(internals): # not next_*
value = tf.tensor_scatter_nd_update(
tensor=buffer, indices=indices, updates=internal
)
assignments.append(buffer.assign(value=value))
# assignments.append(buffer.scatter_nd_update(indices=indices, updates=internal))
for name, buffer, auxiliary in self.auxiliaries_buffer.zip_items(auxiliaries):
value = tf.tensor_scatter_nd_update(
tensor=buffer, indices=indices, updates=auxiliary
)
assignments.append(buffer.assign(value=value))
# assignments.append(buffer.scatter_nd_update(indices=indices, updates=auxiliary))
for name, buffer, action in self.actions_buffer.zip_items(actions):
value = tf.tensor_scatter_nd_update(tensor=buffer, indices=indices, updates=action)
assignments.append(buffer.assign(value=value))
# assignments.append(buffer.scatter_nd_update(indices=indices, updates=action))
# Increment buffer index (after buffer assignments)
with tf.control_dependencies(control_inputs=assignments):
ones = tf_util.ones(shape=tf.expand_dims(input=batch_size, axis=0), dtype='int')
indices = tf.expand_dims(input=parallel, axis=1)
value = tf.tensor_scatter_nd_add(
tensor=self.buffer_index, indices=indices, updates=ones
)
dependencies.append(self.buffer_index.assign(value=value))
# sparse_delta = tf.IndexedSlices(values=ones, indices=parallel)
# dependencies.append(self.buffer_index.scatter_add(sparse_delta=sparse_delta))
with tf.control_dependencies(control_inputs=dependencies):
actions = actions.fmap(
function=(lambda name, x: tf_util.identity(input=x, name=name)), with_names=True
)
next_internals = next_internals.fmap(
function=(lambda name, x: tf_util.identity(input=x, name=name)), with_names=True
)
return actions, next_internals
@tf_function(num_args=3)
def core_observe(self, *, terminal, reward, parallel):
zero = tf_util.constant(value=0, dtype='int')
one = tf_util.constant(value=1, dtype='int')
buffer_index = tf.gather(params=self.buffer_index, indices=parallel)
batch_size = tf_util.cast(x=tf.shape(input=terminal)[0], dtype='int')
expanded_parallel = tf.expand_dims(input=tf.expand_dims(input=parallel, axis=0), axis=1)
if self.circular_buffer:
buffer_start = tf.gather(params=self.buffer_start, indices=parallel)
# Assertion: size of terminal equals number of buffered timesteps
assertions = list()
# if self.config.create_tf_assertions:
# if self.circular_buffer:
# maybe_one = tf.minimum(x=buffer_index, y=self.reward_horizon.value())
# assertions.append(tf.debugging.assert_equal(
# x=batch_size, y=(buffer_index - buffer_start - maybe_one),
# message="Agent.observe: number of observe-timesteps has to be equal to number "
# "of buffered act-timesteps."
# ))
# else:
# assertions.append(tf.debugging.assert_equal(
# x=batch_size, y=buffer_index,
# message="Agent.observe: number of observe-timesteps has to be equal to number "
# "of buffered act-timesteps."
# ))
if self.config.buffer_observe == 'episode':
# Observe inputs are always buffered in agent until episode is terminated
# --> Call core_experience directly, no need for terminal/reward buffers
def fn_nonterminal():
# Should not be called
return tf.debugging.assert_equal(x=batch_size, y=zero)
def fn_terminal():
# Gather values from buffers, and episode experience
function = (lambda x: x[parallel, :buffer_index])
states = self.states_buffer.fmap(function=function, cls=TensorDict)
internals = self.internals_buffer.fmap(function=function, cls=TensorDict)
auxiliaries = self.auxiliaries_buffer.fmap(function=function, cls=TensorDict)
actions = self.actions_buffer.fmap(function=function, cls=TensorDict)
return self.core_experience(
states=states, internals=internals, auxiliaries=auxiliaries, actions=actions,
terminal=terminal, reward=reward
)
elif self.reward_horizon == 'episode' or self.parallel_interactions > 1:
# Observe inputs need to be buffered until episode is terminated
# --> Call core_experience if terminal, otherwise buffer terminal/reward
batch_parallel = tf.fill(dims=(batch_size,), value=parallel)
def fn_nonterminal():
# Update terminal/reward buffers
assignments = list()
indices = tf.range(start=(buffer_index - batch_size), limit=buffer_index)
indices = tf.stack(values=(batch_parallel, indices), axis=1)
value = tf.tensor_scatter_nd_update(
tensor=self.terminal_buffer, indices=indices, updates=terminal
)
assignments.append(self.terminal_buffer.assign(value=value))
value = tf.tensor_scatter_nd_update(
tensor=self.reward_buffer, indices=indices, updates=reward
)
assignments.append(self.reward_buffer.assign(value=value))
return tf.group(assignments)
def fn_terminal():
# Gather values from buffers, and episode experience
function = (lambda x: x[parallel, :buffer_index])
states = self.states_buffer.fmap(function=function, cls=TensorDict)
internals = self.internals_buffer.fmap(function=function, cls=TensorDict)
auxiliaries = self.auxiliaries_buffer.fmap(function=function, cls=TensorDict)
actions = self.actions_buffer.fmap(function=function, cls=TensorDict)
episode_terminal = self.terminal_buffer[parallel, :buffer_index - batch_size]
episode_reward = self.reward_buffer[parallel, :buffer_index - batch_size]
episode_terminal = tf.concat(values=(episode_terminal, terminal), axis=0)
episode_reward = tf.concat(values=(episode_reward, reward), axis=0)
return self.core_experience(
states=states, internals=internals, auxiliaries=auxiliaries, actions=actions,
terminal=episode_terminal, reward=episode_reward
)
else:
# Observe inputs are buffered temporarily and return is computed as soon as possible
# --> Call core_experience if terminal, otherwise ???
capacity = tf_util.constant(value=self.buffer_capacity, dtype='int')
reward_horizon = self.reward_horizon.value()
reward_discount = self.reward_discount.value()
batch_parallel = tf.fill(dims=(batch_size,), value=parallel)
def fn_nonterminal():
# Update terminal/reward buffers
assignments = list()
indices = tf.range(start=(buffer_index - batch_size), limit=buffer_index)
indices = tf.math.mod(x=indices, y=capacity)
indices = tf.stack(values=(batch_parallel, indices), axis=1)
value = tf.tensor_scatter_nd_update(
tensor=self.terminal_buffer, indices=indices, updates=terminal
)
assignments.append(self.terminal_buffer.assign(value=value))
value = tf.tensor_scatter_nd_update(
tensor=self.reward_buffer, indices=indices, updates=reward
)
assignments.append(self.reward_buffer.assign(value=value))
with tf.control_dependencies(control_inputs=assignments):
# Number of completed timesteps to process
num_complete = buffer_index - buffer_start - reward_horizon
def true_fn():
return self._nonterminal_experience(
parallel=parallel, buffer_start=buffer_start, buffer_index=buffer_index,
reward_horizon=reward_horizon, num_complete=num_complete,
reward_discount=reward_discount
)
return tf.cond(pred=(num_complete > zero), true_fn=true_fn, false_fn=tf.no_op)
def fn_terminal():
# Gather values from buffers
indices = tf.range(start=buffer_start, limit=buffer_index)
indices = tf.math.mod(x=indices, y=capacity)
function = (lambda x: tf.gather(params=x[parallel], indices=indices))
states = self.states_buffer.fmap(function=function, cls=TensorDict)
internals = self.internals_buffer.fmap(function=function, cls=TensorDict)
auxiliaries = self.auxiliaries_buffer.fmap(function=function, cls=TensorDict)
actions = self.actions_buffer.fmap(function=function, cls=TensorDict)
indices = tf.range(buffer_start, buffer_index - batch_size)
indices = tf.math.mod(x=indices, y=capacity)
episode_terminal = tf.gather(params=self.terminal_buffer[parallel], indices=indices)
episode_reward = tf.gather(params=self.reward_buffer[parallel], indices=indices)
episode_terminal = tf.concat(values=(episode_terminal, terminal), axis=0)
episode_reward = tf.concat(values=(episode_reward, reward), axis=0)
# Episode experience
experienced = self.core_experience(
states=states, internals=internals, auxiliaries=auxiliaries, actions=actions,
terminal=episode_terminal, reward=episode_reward
)
# Increment buffer start index
with tf.control_dependencies(control_inputs=(indices,)):
zeros = tf_util.zeros(shape=(1,), dtype='int')
value = tf.tensor_scatter_nd_update(
tensor=self.buffer_start, indices=expanded_parallel, updates=zeros
)
assignment = self.buffer_start.assign(value=value)
# sparse_delta = tf.IndexedSlices(values=zero, indices=parallel)
# assignment = self.buffer_start.scatter_update(sparse_delta=sparse_delta)
return tf.group((experienced, assignment))
def fn_terminal_continuation():
# Appropriate terminal function above
operations = [fn_terminal()]
# Reset buffer index
with tf.control_dependencies(control_inputs=operations):
updates = tf_util.zeros(shape=(1,), dtype='int')
indices = tf.expand_dims(input=tf.expand_dims(input=parallel, axis=0), axis=1)
value = tf.tensor_scatter_nd_update(
tensor=self.buffer_index, indices=indices, updates=updates
)
operations.append(self.buffer_index.assign(value=value))
# sparse_delta = tf.IndexedSlices(values=zero, indices=parallel)
# operations.append(self.buffer_index.scatter_update(sparse_delta=sparse_delta))
# Preprocessed episode reward summaries (before preprocessed episode reward reset)
if self.reward_processing is not None:
dependencies = list()
if self.summaries == 'all' or 'reward' in self.summaries or \
self.tracking == 'all' or 'reward' in self.tracking:
if self.summaries == 'all' or 'reward' in self.summaries:
summarizer = self.summarizer.as_default()
summarizer.__enter__()
else:
summarizer = None
x = tf.gather(params=self.preprocessed_episode_return, indices=parallel)
if summarizer is not None:
dependencies.append(tf.summary.scalar(
name='preprocessed-episode-return', data=x, step=self.episodes
))
dependencies.extend(self.track(
label='reward', name='preprocessed-episode-return', data=x
))
if summarizer is not None:
summarizer.__exit__(None, None, None)
# Reset preprocessed episode reward
with tf.control_dependencies(control_inputs=dependencies):
zeros = tf_util.zeros(shape=(1,), dtype='float')
value = tf.tensor_scatter_nd_update(
tensor=self.preprocessed_episode_return, indices=expanded_parallel,
updates=zeros
)
operations.append(self.preprocessed_episode_return.assign(value=value))
# zero_float = tf_util.constant(value=0.0, dtype='float')
# sparse_delta = tf.IndexedSlices(values=zero_float, indices=parallel)
# operations.append(
# self.preprocessed_episode_return.scatter_update(sparse_delta=sparse_delta)
# )
# Reset preprocessors
for preprocessor in self.state_preprocessing.values():
operations.append(preprocessor.reset())
if self.reward_processing is not None:
operations.append(self.reward_processing.reset())
return tf.group(*operations)
# Reward preprocessing
dependencies = assertions
if self.reward_processing is not None:
with tf.control_dependencies(control_inputs=dependencies):
dependencies = list()
true = tf_util.constant(value=True, dtype='bool')
reward = self.reward_processing.apply(
x=reward, deterministic=true, independent=False
)
# Preprocessed reward summary
if self.summaries == 'all' or 'reward' in self.summaries or \
self.tracking == 'all' or 'reward' in self.tracking:
if self.summaries == 'all' or 'reward' in self.summaries:
summarizer = self.summarizer.as_default()
summarizer.__enter__()
else:
summarizer = None
x = tf.math.reduce_mean(input_tensor=reward, axis=0)
if summarizer is not None:
dependencies.append(tf.summary.scalar(
name='preprocessed-reward', data=x, step=self.timesteps
))
dependencies.extend(self.track(
label='reward', name='preprocessed-reward', data=x
))
if summarizer is not None:
summarizer.__exit__(None, None, None)
# Update preprocessed episode reward
sum_reward = tf.math.reduce_sum(input_tensor=reward, keepdims=True)
value = tf.tensor_scatter_nd_add(
tensor=self.preprocessed_episode_return, indices=expanded_parallel,
updates=sum_reward
)
dependencies.append(self.preprocessed_episode_return.assign(value=value))
# sum_reward = tf.math.reduce_sum(input_tensor=reward)
# sparse_delta = tf.IndexedSlices(values=sum_reward, indices=parallel)
# dependencies.append(
# self.preprocessed_episode_return.scatter_add(sparse_delta=sparse_delta)
# )
# Handle terminal vs non-terminal (after preprocessed episode reward)
with tf.control_dependencies(control_inputs=dependencies):
is_terminal = tf.concat(values=([zero], terminal), axis=0)[-1] > zero
experienced = tf.cond(
pred=is_terminal, true_fn=fn_terminal_continuation, false_fn=fn_nonterminal
)
# Handle periodic update
with tf.control_dependencies(control_inputs=(experienced,)):
if self.update_frequency is None:
updated = tf_util.constant(value=False, dtype='bool')
else:
frequency = self.update_frequency.value()
start = self.update_start.value()
if self.update_unit == 'timesteps':
# Timestep-based batch
past_horizon = tf.math.maximum(
x=self.policy.past_horizon(on_policy=False),
y=self.baseline.past_horizon(on_policy=False)
)
unit = self.timesteps
start = tf.math.maximum(x=start, y=(frequency + past_horizon))
if self.reward_horizon == 'episode':
min_start = tf.where(
condition=(self.episodes > zero), x=start, y=(unit + one)
)
start = tf.math.maximum(x=start, y=min_start)
else:
two = tf_util.constant(value=2, dtype='int')
start = tf.where(
condition=(self.episodes > zero), x=zero,
y=(start + two * self.reward_horizon.value())
)
if self.config.buffer_observe == 'episode':
min_start = tf.where(
condition=(self.episodes > zero), x=start, y=(unit + one)
)
start = tf.math.maximum(x=start, y=min_start)
else:
buffer_observe = tf_util.constant(
value=self.config.buffer_observe, dtype='int'
)
start = tf.math.maximum(x=start, y=buffer_observe)
elif self.update_unit == 'episodes':
# Episode-based batch
start = tf.math.maximum(x=start, y=frequency)
# (Episode counter is only incremented at the end of observe)
unit = self.episodes + tf.where(condition=is_terminal, x=one, y=zero)
unit = unit - start
is_frequency = tf.math.greater_equal(x=unit, y=(self.last_update + frequency))
def perform_update():
assignment = self.last_update.assign(value=unit, read_value=False)
with tf.control_dependencies(control_inputs=(assignment,)):
return self.core_update()
def no_update():
return tf_util.constant(value=False, dtype='bool')
updated = tf.cond(pred=is_frequency, true_fn=perform_update, false_fn=no_update)
with tf.control_dependencies(control_inputs=(updated,)):
return tf_util.identity(input=updated)
def _nonterminal_experience(
self, *, parallel, buffer_start, buffer_index, reward_horizon, num_complete, reward_discount
):
# (similar to _terminal_experience_parallel)
one = tf_util.constant(value=1, dtype='int')
capacity = tf_util.constant(value=self.buffer_capacity, dtype='int')
# Whether to predict horizon values now
if self.predict_horizon_values != 'early':
assert self.trace_decay.is_constant(value=1.0)
horizon_values = tf_util.zeros(
shape=tf.expand_dims(input=num_complete, axis=0), dtype='float'
)
else:
# Baseline horizon
baseline_horizon = self.baseline.past_horizon(on_policy=True)
if self.trace_decay.is_constant(value=1.0):
assertion = tf.debugging.assert_less_equal(
x=baseline_horizon, y=reward_horizon,
message="Baseline on-policy horizon greater than reward estimation horizon "
"currently not supported if prediction_horizon_values = \"early\"."
)
else:
zero = tf_util.constant(value=0, dtype='int')
assertion = tf.debugging.assert_less_equal(
x=baseline_horizon, y=zero,
message="Baseline on-policy horizon currently not supported if "
"trace_decay != 1.0."
)
with tf.control_dependencies(control_inputs=(assertion,)):
# Index range to gather from buffers
if self.trace_decay.is_constant(value=1.0):
# Only indices relevant for horizon values
indices = tf.range(
start=(buffer_start + reward_horizon - baseline_horizon), limit=buffer_index
)
ints_end = num_complete
auxs_start = baseline_horizon
horizons_start = tf.range(num_complete)
horizons_length = tf.fill(dims=(num_complete,), value=(baseline_horizon + one))
else:
# All indices
indices = tf.range(start=(buffer_start + one), limit=buffer_index)
ints_end = None
auxs_start = None
horizons_start = tf.range(buffer_index - buffer_start - one)
horizons_length = tf.ones_like(input=horizons_start)
indices = tf.math.mod(x=indices, y=capacity)
# Return-sequence per timestep, as horizons indexing tensor
horizons = tf.stack(values=(horizons_start, horizons_length), axis=1)
# Gather states
function = (lambda x: tf.gather(params=x[parallel], indices=indices))
states = self.states_buffer.fmap(function=function, cls=TensorDict)
# Gather internals, only for return-sequence start
function = (lambda x: tf.gather(params=x[parallel], indices=indices[:ints_end]))
key = ('baseline' if self.separate_baseline else 'policy')
if len(self.internals_spec[key]) > 0:
internals = self.internals_buffer[key].fmap(function=function, cls=TensorDict)
else:
internals = TensorDict()
# Gather auxiliaries (and actions), only for return-sequence end
function = (lambda x: tf.gather(params=x[parallel], indices=indices[auxs_start:]))
auxiliaries = self.auxiliaries_buffer.fmap(function=function, cls=TensorDict)
# Predict values
if self.predict_action_values:
# TODO: option to re-sample action deterministically?
actions = self.actions_buffer.fmap(function=function, cls=TensorDict)
values = self.baseline.action_value(
states=states, horizons=horizons, internals=internals,
auxiliaries=auxiliaries, actions=actions
)
else:
values = self.baseline.state_value(
states=states, horizons=horizons, internals=internals,
auxiliaries=auxiliaries
)
# Horizon values
if self.trace_decay.is_constant(value=1.0):
horizon_values = values
else:
horizon_values = values[reward_horizon - one:]
# Gather all rewards (incl return-horizon) from buffer
indices = tf.range(start=buffer_start, limit=(buffer_index - one))
indices = tf.math.mod(x=indices, y=capacity)
reward = tf.gather(params=self.reward_buffer[parallel], indices=indices)
# Recursive return
if self.trace_decay.is_constant(value=1.0):
# Discounted cumulative sum
def recursive_return(next_return, index):
return reward[index: index + num_complete] + reward_discount * next_return
else:
# TD-lambda
one_float = tf_util.constant(value=1.0, dtype='float')
trace_decay = self.trace_decay.value()
def recursive_return(next_return, index):
next_value = values[index: index + num_complete]
next_return = (one_float - trace_decay) * next_value + trace_decay * next_return
return reward[index: index + num_complete] + reward_discount * next_return
reward = tf.foldr(
fn=recursive_return, elems=tf.range(reward_horizon), initializer=horizon_values
)
# Gather other values of completed timesteps from buffers
indices = tf.range(start=buffer_start, limit=(buffer_start + num_complete))
indices = tf.math.mod(x=indices, y=capacity)
function = (lambda x: tf.gather(params=x[parallel], indices=indices))
states = self.states_buffer.fmap(function=function, cls=TensorDict)
internals = self.internals_buffer.fmap(function=function, cls=TensorDict)
auxiliaries = self.auxiliaries_buffer.fmap(function=function, cls=TensorDict)
actions = self.actions_buffer.fmap(function=function, cls=TensorDict)
terminal = function(self.terminal_buffer)
# Store completed timesteps
experienced = self.memory.enqueue(
states=states, internals=internals, auxiliaries=auxiliaries, actions=actions,
terminal=terminal, reward=reward
)
# Increment buffer start index
with tf.control_dependencies(control_inputs=(indices,)):
updates = tf.expand_dims(input=num_complete, axis=0)
indices = tf.expand_dims(input=tf.expand_dims(input=parallel, axis=0), axis=1)
value = tf.tensor_scatter_nd_add(
tensor=self.buffer_start, indices=indices, updates=updates
)
assignment = self.buffer_start.assign(value=value)
# sparse_delta = tf.IndexedSlices(values=num_complete, indices=parallel)
# assignment = self.buffer_start.scatter_add(sparse_delta=sparse_delta)
return tf.group((experienced, assignment))
@tf_function(num_args=6)
def core_experience(self, *, states, internals, auxiliaries, actions, terminal, reward):
episode_length = tf_util.cast(x=tf.shape(input=terminal)[0], dtype='int')
reward_discount = self.reward_discount.value()
if self.reward_horizon == 'episode':
# Reward horizon is entire episode
reward = self._terminal_experience_iterative(
episode_length=episode_length, reward_discount=reward_discount, states=states,
internals=internals, auxiliaries=auxiliaries, actions=actions, reward=reward,
terminal=terminal
)
else:
# Optimize required loop iterations, so whether to process remaining timesteps
# - iteratively, if remaining episode length is at most reward horizon
# - in parallel, if reward horizon is less than remaining episode length
reward_horizon = self.reward_horizon.value()
def true_fn():
return self._terminal_experience_iterative(
episode_length=episode_length, reward_discount=reward_discount, states=states,
internals=internals, auxiliaries=auxiliaries, actions=actions, reward=reward,
terminal=terminal
)
def false_fn():
return self._terminal_experience_parallel(
episode_length=episode_length, reward_horizon=reward_horizon,
reward_discount=reward_discount, states=states, internals=internals,
auxiliaries=auxiliaries, actions=actions, reward=reward, terminal=terminal
)
reward = tf.cond(
pred=(episode_length <= reward_horizon), true_fn=true_fn, false_fn=false_fn
)
# Store episode
return self.memory.enqueue(
states=states, internals=internals, auxiliaries=auxiliaries, actions=actions,
terminal=terminal, reward=reward
)
def _terminal_experience_iterative(
self, *, episode_length, reward_discount,
states, internals, auxiliaries, actions, reward, terminal
):
zero = tf_util.constant(value=0, dtype='int')
one = tf_util.constant(value=1, dtype='int')
zero_float = tf_util.constant(value=0.0, dtype='float')
internals = (internals['baseline'] if self.separate_baseline else internals['policy'])
if self.trace_decay.is_constant(value=1.0):
# Whether to predict horizon/terminal values now
if self.predict_horizon_values != 'early':
# Whether to predict all or only abort-terminals
# (-reward[-1] since terminal state value will be predicted)
terminal_value = -reward[-1] / reward_discount
if not self.predict_terminal_values:
is_terminal = tf.math.equal(x=terminal[-1], y=one)
terminal_value = tf.where(condition=is_terminal, x=zero_float, y=terminal_value)
else:
def predict_terminal_value():
# Baseline horizon
baseline_horizon = self.baseline.past_horizon(on_policy=True)
baseline_horizon = tf.math.minimum(x=baseline_horizon, y=episode_length)
# Single-step horizon
horizon_start = episode_length - one - baseline_horizon
horizons = tf.expand_dims(
input=tf.stack(values=(zero, baseline_horizon + one)), axis=0
)
# Predict values
if self.predict_action_values:
# TODO: option to re-sample action deterministically?
# Use given actions since early estimate
# if self.separate_baseline:
# policy_horizon = self.policy.past_horizon(on_policy=True)
# policy_horizon = tf.math.minimum(x=policy_horizon, y=episode_length)
# policy_horizon_start = terminal_index - policy_horizon
# else:
# policy_horizon_start = past_horizon_start
# deterministic = tf_util.constant(value=True, dtype='bool')
# _actions, _ = self.policy.act(
# states=states[policy_horizon_start:], horizons=horizons[:maybe_one],
# internals=internals['policy'][policy_horizon_start: policy_horizon_start + maybe_one],
# auxiliaries=auxiliaries[terminal_index:], deterministic=deterministic,
# independent=True
# )
terminal_value = self.baseline.action_value(
states=states[horizon_start:], horizons=horizons,
internals=internals[horizon_start: horizon_start + one],
auxiliaries=auxiliaries[-1:],
actions=actions[-1:]
)
else:
terminal_value = self.baseline.state_value(
states=states[horizon_start:], horizons=horizons,
internals=internals[horizon_start: horizon_start + one],
auxiliaries=auxiliaries[-1:]
)
# Modification to correct for use as initializer in tf.scan
# (-reward[-1] since terminal state value will be predicted)
return (terminal_value[0] - reward[-1]) / reward_discount
# Whether to predict all or only abort-terminals
if self.predict_terminal_values:
terminal_value = predict_terminal_value()
else:
is_terminal = tf.math.equal(x=terminal[-1], y=one)
terminal_value = tf.cond(
pred=is_terminal, true_fn=(lambda: zero_float),
false_fn=predict_terminal_value
)
# Discounted cumulative sum return
def recursive_return(next_return, current_reward):
return current_reward + reward_discount * next_return
return tf.scan(
fn=recursive_return, elems=reward, initializer=terminal_value, reverse=True
)
else:
# Baseline horizon
baseline_horizon = self.baseline.past_horizon(on_policy=True)
assertion = tf.debugging.assert_equal(
x=baseline_horizon, y=zero,
message="Baseline cannot have on-policy horizon if trace_decay != 1.0."
)
with tf.control_dependencies(control_inputs=(assertion,)):
# Baseline-horizon-sequence per timestep, as horizons indexing tensor
horizons_start = tf.range(episode_length - one)
horizons_length = tf.fill(dims=(episode_length - one,), value=one)
horizons = tf.stack(values=(horizons_start, horizons_length), axis=1)
if self.predict_action_values:
# TODO: option to re-sample action deterministically?
values = self.baseline.action_value(
states=states[1:], horizons=horizons, internals=internals[1:],
auxiliaries=auxiliaries[1:], actions=actions[1:]
)
else:
values = self.baseline.state_value(
states=states[1:], horizons=horizons, internals=internals[1:],
auxiliaries=auxiliaries[1:]
)
# Modification to correct for use as initializer in tf.scan
# (-reward[-1] since terminal state value will be predicted)
terminal_value = (values[-1] - reward[-1]) / reward_discount
# Whether to predict all or only abort-terminals
if not self.predict_terminal_values:
is_terminal = tf.math.equal(x=terminal[-1], y=one)
terminal_value = tf.where(condition=is_terminal, x=zero_float, y=terminal_value)
values = tf.concat(values=(values, [terminal_value]), axis=0)
# TD-lambda return
one_float = tf_util.constant(value=1.0, dtype='float')
trace_decay = self.trace_decay.value()
def recursive_return(next_return, reward_value):
current_reward, next_value = reward_value
next_return = (one_float - trace_decay) * next_value + trace_decay * next_return
return current_reward + reward_discount * next_return
return tf.scan(
fn=recursive_return, elems=(reward, values), initializer=terminal_value,
reverse=True
)
def _terminal_experience_parallel(
self, *, episode_length, reward_horizon, reward_discount,
states, internals, auxiliaries, actions, reward, terminal
):
# (similar to _nonterminal_experience)
one = tf_util.constant(value=1, dtype='int')
internals = (internals['baseline'] if self.separate_baseline else internals['policy'])
# Whether to predict horizon values now
if self.predict_horizon_values != 'early':
assert self.trace_decay.is_constant(value=1.0)
# Whether to predict all or only abort-terminals
terminal_value = tf_util.constant(value=0.0, dtype='float')
if not self.predict_terminal_values:
is_terminal = tf.math.equal(x=terminal[-1], y=one)
terminal_value = tf.where(condition=is_terminal, x=reward[-1], y=terminal_value)
# Horizon-expanded rewards and values
horizon_values = tf_util.zeros(
shape=tf.expand_dims(input=episode_length, axis=0), dtype='float'
)
reward = tf.concat(
values=(reward[:-1], [terminal_value], horizon_values[:reward_horizon]), axis=0
)
else:
# Baseline horizon
baseline_horizon = self.baseline.past_horizon(on_policy=True)
assertions = list() # (control dependency below, before baseline call)
if not self.trace_decay.is_constant(value=1.0):
zero = tf_util.constant(value=0, dtype='int')
assertions.append(tf.debugging.assert_equal(
x=baseline_horizon, y=zero,
message="Baseline cannot have on-policy horizon if trace_decay != 1.0."
))
# Index starts/ends
if self.trace_decay.is_constant(value=1.0):
# Only indices relevant for horizon values
reward_horizon_start = reward_horizon
zero = tf_util.constant(value=0, dtype='int')
baseline_horizon_start = tf.maximum(
x=(reward_horizon_start - baseline_horizon), y=zero
)
baseline_horizon_end = episode_length - baseline_horizon
baseline_horizon_end = tf.maximum(x=baseline_horizon_end, y=baseline_horizon_start)
horizons_start = tf.range(baseline_horizon_end - baseline_horizon_start)
horizons_length = reward_horizon_start + horizons_start
horizons_length = tf.math.minimum(x=horizons_length, y=(baseline_horizon + one))
else:
# All indices
reward_horizon_start = 1
baseline_horizon_start = 1
baseline_horizon_end = None
horizons_start = tf.range(episode_length - one)
horizons_length = tf.ones_like(input=horizons_start)
# Baseline-horizon-sequence per timestep, as horizons indexing tensor
horizons = tf.stack(values=(horizons_start, horizons_length), axis=1)
# Predict values
with tf.control_dependencies(control_inputs=assertions):
if self.predict_action_values:
# TODO: option to re-sample action deterministically?
values = self.baseline.action_value(
states=states[baseline_horizon_start:],
horizons=horizons,
internals=internals[baseline_horizon_start: baseline_horizon_end],
auxiliaries=auxiliaries[reward_horizon_start:],
actions=actions[reward_horizon_start:]
)
else:
values = self.baseline.state_value(
states=states[baseline_horizon_start:],
horizons=horizons,
internals=internals[baseline_horizon_start: baseline_horizon_end],
auxiliaries=auxiliaries[reward_horizon_start:]
)
# Whether to predict all or only abort-terminals
terminal_value = values[-1]
if not self.predict_terminal_values:
is_terminal = tf.math.equal(x=terminal[-1], y=one)
terminal_value = tf.where(condition=is_terminal, x=reward[-1], y=terminal_value)
# Horizon-expanded rewards and values
zeros_reward_horizon = tf_util.zeros(
shape=tf.expand_dims(input=(reward_horizon - one), axis=0), dtype='float'
)
reward = tf.concat(values=(reward[:-1], [terminal_value], zeros_reward_horizon), axis=0)
zeros_reward_horizon = tf_util.zeros(
shape=tf.expand_dims(reward_horizon, axis=0), dtype='float'
)
values = tf.concat(values=(values, zeros_reward_horizon), axis=0)
# Horizon values
if self.trace_decay.is_constant(value=1.0):
horizon_values = values
else:
horizon_values = values[reward_horizon - one:]
# Recursive return
if self.trace_decay.is_constant(value=1.0):
# Discounted cumulative sum
def recursive_return(next_return, index):
return reward[index: index + episode_length] + reward_discount * next_return
else:
# TD-lambda
one_float = tf_util.constant(value=1.0, dtype='float')
trace_decay = self.trace_decay.value()
def recursive_return(next_return, index):
next_value = values[index: index + episode_length]
next_return = (one_float - trace_decay) * next_value + trace_decay * next_return
return reward[index: index + episode_length] + reward_discount * next_return
return tf.foldr(
fn=recursive_return, elems=tf.range(reward_horizon), initializer=horizon_values
)
@tf_function(num_args=0)
def core_update(self):
zero = tf_util.constant(value=0, dtype='int')
one = tf_util.constant(value=1, dtype='int')
true = tf_util.constant(value=True, dtype='bool')
# Retrieve batch
batch_size = self.update_batch_size.value()
if self.update_unit == 'timesteps':
# Timestep-based batch
# Dependency horizon
past_horizon = tf.math.maximum(
x=self.policy.past_horizon(on_policy=False),
y=self.baseline.past_horizon(on_policy=False)
)
if self.predict_horizon_values != 'late':
future_horizon = zero
elif self.reward_horizon == 'episode':
future_horizon = tf_util.constant(value=self.max_episode_timesteps, dtype='int')
else:
future_horizon = self.reward_horizon.value()
indices = self.memory.retrieve_timesteps(
n=batch_size, past_horizon=past_horizon, future_horizon=future_horizon
)
elif self.update_unit == 'episodes':
# Episode-based batch
indices = self.memory.retrieve_episodes(n=batch_size)
# Retrieve states and internals
policy_horizon = self.policy.past_horizon(on_policy=False)
if self.separate_baseline and self.baseline_optimizer is None:
assertions = list()
if self.config.create_tf_assertions:
assertions.append(tf.debugging.assert_equal(
x=policy_horizon, y=self.baseline.past_horizon(on_policy=False),
message="Policy and baseline cannot depend on a different number of previous "
"states if baseline_optimizer is None."
))
with tf.control_dependencies(control_inputs=assertions):
policy_horizons, sequence_values, initial_values = self.memory.predecessors(
indices=indices, horizon=policy_horizon, sequence_values=('states',),
initial_values=('internals',)
)
baseline_horizons = policy_horizons
baseline_states = policy_states = sequence_values['states']
internals = policy_internals = initial_values['internals']
if self.separate_baseline:
baseline_internals = policy_internals['baseline']
else:
baseline_internals = policy_internals
else:
if self.baseline_optimizer is None:
policy_horizons, sequence_values, initial_values = self.memory.predecessors(
indices=indices, horizon=policy_horizon, sequence_values=('states',),
initial_values=('internals',)
)
policy_states = sequence_values['states']
internals = policy_internals = initial_values['internals']
elif len(self.internals_spec['policy']) > 0:
policy_horizons, sequence_values, initial_values = self.memory.predecessors(
indices=indices, horizon=policy_horizon, sequence_values=('states',),
initial_values=('internals/policy',)
)
policy_states = sequence_values['states']
internals = initial_values['internals']
policy_internals = initial_values['internals/policy']
else:
policy_horizons, sequence_values = self.memory.predecessors(
indices=indices, horizon=policy_horizon, sequence_values=('states',),
initial_values=()
)
policy_states = sequence_values['states']
internals = policy_internals = TensorDict()
# Optimize !!!!!
baseline_horizon = self.baseline.past_horizon(on_policy=False)
if self.separate_baseline:
if len(self.internals_spec['baseline']) > 0:
baseline_horizons, sequence_values, initial_values = self.memory.predecessors(
indices=indices, horizon=baseline_horizon, sequence_values=('states',),
initial_values=('internals/baseline',)
)
baseline_states = sequence_values['states']
internals = initial_values['internals']
baseline_internals = initial_values['internals/baseline']
else:
baseline_horizons, sequence_values = self.memory.predecessors(
indices=indices, horizon=baseline_horizon, sequence_values=('states',),
initial_values=()
)
baseline_states = sequence_values['states']
internals = baseline_internals = TensorDict()
else:
if len(self.internals_spec['policy']) > 0:
baseline_horizons, sequence_values, initial_values = self.memory.predecessors(
indices=indices, horizon=baseline_horizon, sequence_values=('states',),
initial_values=('internals/policy',)
)
baseline_states = sequence_values['states']
internals = initial_values['internals']
baseline_internals = initial_values['internals/policy']
else:
baseline_horizons, sequence_values = self.memory.predecessors(
indices=indices, horizon=baseline_horizon, sequence_values=('states',),
initial_values=()
)
baseline_states = sequence_values['states']
internals = baseline_internals = TensorDict()
# Retrieve auxiliaries, actions, reward
if self.gae_decay.is_constant(value=0.0):
values = self.memory.retrieve(
indices=indices, values=('auxiliaries', 'actions', 'reward')
)
else:
values = self.memory.retrieve(
indices=indices, values=('auxiliaries', 'actions', 'reward', 'terminal')
)
terminal = values['terminal']
auxiliaries = values['auxiliaries']
actions = values['actions']
reward = values['reward']
# Return estimation
if self.predict_horizon_values == 'late':
reward = self._complete_horizon_values(
indices=indices, internals=internals, reward=reward
)
dependencies = [reward]
if self.summaries == 'all' or 'reward' in self.summaries or \
self.tracking == 'all' or 'reward' in self.tracking:
if self.summaries == 'all' or 'reward' in self.summaries:
summarizer = self.summarizer.as_default()
summarizer.__enter__()
else:
summarizer = None
x = tf.math.reduce_mean(input_tensor=reward, axis=0)
if summarizer is not None:
dependencies.append(tf.summary.scalar(
name='update-return', data=x, step=self.updates
))
dependencies.extend(self.track(label='reward', name='update-return', data=x))
if summarizer is not None:
summarizer.__exit__(None, None, None)
if self.return_processing is not None:
with tf.control_dependencies(control_inputs=dependencies):
reward = self.return_processing.apply(
x=reward, deterministic=true, independent=False
)
dependencies = [reward]
if self.summaries == 'all' or 'reward' in self.summaries or \
self.tracking == 'all' or 'reward' in self.tracking:
if self.summaries == 'all' or 'reward' in self.summaries:
summarizer = self.summarizer.as_default()
summarizer.__enter__()
else:
summarizer = None
x = tf.math.reduce_mean(input_tensor=reward, axis=0)
if summarizer is not None:
dependencies.append(tf.summary.scalar(
name='update-processed-return', data=x, step=self.updates
))
dependencies.extend(self.track(
label='reward', name='update-processed-return', data=x
))
if summarizer is not None:
summarizer.__exit__(None, None, None)
baseline_arguments = TensorDict(
states=baseline_states, horizons=baseline_horizons, internals=baseline_internals,
auxiliaries=auxiliaries, actions=actions, reward=reward
)
if self.baseline_objective is not None:
baseline_arguments['reference'] = self.baseline_objective.reference(
states=baseline_states, horizons=baseline_horizons, internals=baseline_internals,
auxiliaries=auxiliaries, actions=actions, policy=self.baseline
)
if self.baseline_optimizer is not None and self.estimate_advantage != 'early':
def fn_kl_divergence(
*, states, horizons, internals, auxiliaries, actions, reward, reference
):
reference = self.baseline.kldiv_reference(
states=states, horizons=horizons, internals=internals, auxiliaries=auxiliaries
)
return self.baseline.kl_divergence(
states=states, horizons=horizons, internals=internals, auxiliaries=auxiliaries,
reference=reference
)
variables = tuple(self.baseline.trainable_variables)
kwargs = dict()
try:
ordered_names = [variable.name for variable in variables]
kwargs['source_variables'] = tuple(sorted(
self.policy.trainable_variables,
key=(lambda x: ordered_names.index(x.name.replace('/policy/', '/baseline/')))
))
except ValueError:
pass
dependencies.extend(baseline_arguments.flatten())
# Optimization
with tf.control_dependencies(control_inputs=dependencies):
optimized = self.baseline_optimizer.update(
arguments=baseline_arguments, variables=variables, fn_loss=self.baseline_loss,
fn_kl_divergence=fn_kl_divergence, **kwargs
)
dependencies = [optimized]
with tf.control_dependencies(control_inputs=dependencies):
if self.estimate_advantage is not False and not self.advantage_in_loss:
if self.predict_action_values:
# Use past actions since advantage R(s,a) - Q(s,a)
baseline_prediction = self.baseline.action_value(
states=baseline_states, horizons=baseline_horizons,
internals=baseline_internals, auxiliaries=auxiliaries, actions=actions
)
else:
baseline_prediction = self.baseline.state_value(
states=baseline_states, horizons=baseline_horizons,
internals=baseline_internals, auxiliaries=auxiliaries
)
reward = reward - baseline_prediction
dependencies = [reward]
if self.summaries == 'all' or 'reward' in self.summaries or \
self.tracking == 'all' or 'reward' in self.tracking:
if self.summaries == 'all' or 'reward' in self.summaries:
summarizer = self.summarizer.as_default()
summarizer.__enter__()
else:
summarizer = None
x = tf.math.reduce_mean(input_tensor=reward, axis=0)
if summarizer is not None:
dependencies.append(tf.summary.scalar(
name='update-advantage', data=x, step=self.updates
))
dependencies.extend(self.track(
label='reward', name='update-advantage', data=x
))
if summarizer is not None:
summarizer.__exit__(None, None, None)
if self.advantage_processing is not None:
with tf.control_dependencies(control_inputs=dependencies):
reward = self.advantage_processing.apply(
x=reward, deterministic=true, independent=False
)
dependencies = [reward]
if self.summaries == 'all' or 'reward' in self.summaries or \
self.tracking == 'all' or 'reward' in self.tracking:
if self.summaries == 'all' or 'reward' in self.summaries:
summarizer = self.summarizer.as_default()
summarizer.__enter__()
else:
summarizer = None
x = tf.math.reduce_mean(input_tensor=reward, axis=0)
if summarizer is not None:
dependencies.append(tf.summary.scalar(
name='update-processed-advantage', data=x,
step=self.updates
))
dependencies.extend(self.track(
label='reward', name='update-processed-advantage', data=x
))
if summarizer is not None:
summarizer.__exit__(None, None, None)
if not self.gae_decay.is_constant(value=0.0):
with tf.control_dependencies(control_inputs=dependencies):
# Requires consistent batch!!!
zero_float = tf_util.constant(value=0.0, dtype='float')
reward_discount = self.reward_discount.value()
gae_decay = self.gae_decay.value()
# Discounted cumulative sum
def recursive_gae(next_gae, advantage_terminal):
current_advantage, current_terminal = advantage_terminal
next_gae = tf.where(
condition=(current_terminal == zero), x=next_gae, y=zero_float
)
return current_advantage + reward_discount * gae_decay * next_gae
reward = tf.scan(
fn=recursive_gae, elems=(reward, terminal), initializer=zero_float,
reverse=True
)
dependencies = [reward]
if self.summaries == 'all' or 'reward' in self.summaries or \
self.tracking == 'all' or 'reward' in self.tracking:
if self.summaries == 'all' or 'reward' in self.summaries:
summarizer = self.summarizer.as_default()
summarizer.__enter__()
else:
summarizer = None
x = tf.math.reduce_mean(input_tensor=reward, axis=0)
if summarizer is not None:
dependencies.append(tf.summary.scalar(
name='update-gae', data=x, step=self.updates
))
dependencies.extend(self.track(
label='reward', name='update-gae', data=x
))
if summarizer is not None:
summarizer.__exit__(None, None, None)
if self.baseline_optimizer is None:
policy_only_internals = policy_internals['policy']
else:
policy_only_internals = policy_internals
reference = self.objective.reference(
states=policy_states, horizons=policy_horizons, internals=policy_only_internals,
auxiliaries=auxiliaries, actions=actions, policy=self.policy
)
if self.baseline_objective is not None and self.baseline_loss_weight is not None and \
not self.baseline_loss_weight.is_constant(value=0.0):
reference = TensorDict(policy=reference, baseline=baseline_arguments['reference'])
policy_arguments = TensorDict(
states=policy_states, horizons=policy_horizons, internals=policy_internals,
auxiliaries=auxiliaries, actions=actions, reward=reward, reference=reference
)
if self.estimate_advantage is not False and self.advantage_in_loss:
variables = tuple(self.trainable_variables)
def fn_loss(*, states, horizons, internals, auxiliaries, actions, reward, reference):
assertions = list()
if self.config.create_tf_assertions:
past_horizon = self.baseline.past_horizon(on_policy=False)
# TODO: remove restriction
assertions.append(tf.debugging.assert_less_equal(
x=(horizons[:, 1] - one), y=past_horizon,
message="Baseline horizon cannot be greater than policy horizon."
))
with tf.control_dependencies(control_inputs=assertions):
if self.predict_action_values:
# Use past actions since advantage R(s,a) - Q(s,a)
baseline_prediction = self.baseline.action_value(
states=states, horizons=horizons, internals=internals['baseline'],
auxiliaries=auxiliaries, actions=actions
)
else:
baseline_prediction = self.baseline.state_value(
states=states, horizons=horizons, internals=internals['baseline'],
auxiliaries=auxiliaries
)
reward = reward - baseline_prediction
def fn_summary1():
return tf.math.reduce_mean(input_tensor=reward, axis=0)
dependencies = self.summary(
label='reward', name='update-advantage', data=fn_summary1, step='updates'
)
dependencies.extend(self.track(
label='reward', name='update-advantage', data=fn_summary1
))
if self.advantage_processing is not None:
with tf.control_dependencies(control_inputs=dependencies):
reward = self.advantage_processing.apply(
x=reward, deterministic=true, independent=False
)
def fn_summary2():
return tf.math.reduce_mean(input_tensor=reward, axis=0)
dependencies = self.summary(
label='reward', name='update-processed-advantage',
data=fn_summary2, step='updates'
)
dependencies.extend(self.track(
label='reward', name='update-processed-advantage', data=fn_summary2
))
with tf.control_dependencies(control_inputs=dependencies):
return self.loss(
states=states, horizons=horizons, internals=internals,
auxiliaries=auxiliaries, actions=actions, reward=reward, reference=reference
)
else:
variables = tuple(self.policy.trainable_variables)
fn_loss = self.loss
def fn_kl_divergence(
*, states, horizons, internals, auxiliaries, actions, reward, reference
):
if self.baseline_optimizer is None:
internals = internals['policy']
# TODO: Policy require
reference = self.policy.kldiv_reference(
states=states, horizons=horizons, internals=internals, auxiliaries=auxiliaries
)
return self.policy.kl_divergence(
states=states, horizons=horizons, internals=internals, auxiliaries=auxiliaries,
reference=reference
)
kwargs = dict()
if self.separate_baseline:
try:
ordered_names = [variable.name for variable in variables]
kwargs['source_variables'] = tuple(sorted(
self.baseline.trainable_variables,
key=(lambda x: ordered_names.index(x.name.replace('/baseline/', '/policy/')))
))
except ValueError:
pass
# if self.global_model is not None:
# assert 'global_variables' not in kwargs
# kwargs['global_variables'] = tuple(self.global_model.trainable_variables)
dependencies.extend(policy_arguments.flatten())
# Hack: KL divergence summary: reference before update
if isinstance(self.policy, StochasticPolicy) and (
self.summaries == 'all' or 'kl-divergence' in self.summaries or
self.tracking == 'all' or 'kl-divergence' in self.tracking
):
kldiv_reference = self.policy.kldiv_reference(
states=policy_states, horizons=policy_horizons, internals=policy_only_internals,
auxiliaries=auxiliaries
)
dependencies.extend(kldiv_reference.flatten())
# Optimization
with tf.control_dependencies(control_inputs=dependencies):
optimized = self.optimizer.update(
arguments=policy_arguments, variables=variables, fn_loss=fn_loss,
fn_kl_divergence=fn_kl_divergence, **kwargs
)
dependencies = [optimized]
if self.baseline_optimizer is not None and self.estimate_advantage == 'early':
def fn_kl_divergence(
*, states, horizons, internals, auxiliaries, actions, reward, reference
):
reference = self.baseline.kldiv_reference(
states=states, horizons=horizons, internals=internals, auxiliaries=auxiliaries
)
return self.baseline.kl_divergence(
states=states, horizons=horizons, internals=internals, auxiliaries=auxiliaries,
reference=reference
)
variables = tuple(self.baseline.trainable_variables)
kwargs = dict()
try:
ordered_names = [variable.name for variable in variables]
kwargs['source_variables'] = tuple(sorted(
self.policy.trainable_variables,
key=(lambda x: ordered_names.index(x.name.replace('/policy/', '/baseline/')))
))
except ValueError:
pass
dependencies.extend(baseline_arguments.flatten())
# Optimization
with tf.control_dependencies(control_inputs=dependencies):
optimized = self.baseline_optimizer.update(
arguments=baseline_arguments, variables=variables, fn_loss=self.baseline_loss,
fn_kl_divergence=fn_kl_divergence, **kwargs
)
dependencies = [optimized]
# Update summaries
with tf.control_dependencies(control_inputs=dependencies):
dependencies = list()
# Entropy summaries
if isinstance(self.policy, StochasticPolicy) and (
self.summaries == 'all' or 'entropy' in self.summaries or
self.tracking == 'all' or 'entropy' in self.tracking
):
if self.summaries == 'all' or 'entropy' in self.summaries:
summarizer = self.summarizer.as_default()
summarizer.__enter__()
else:
summarizer = None
if len(self.actions_spec) > 1:
entropies = self.policy.entropies(
states=policy_states, horizons=policy_horizons,
internals=policy_only_internals, auxiliaries=auxiliaries
)
for name, spec in self.actions_spec.items():
entropies[name] = tf.reshape(tensor=entropies[name], shape=(-1,))
entropy = tf.math.reduce_mean(input_tensor=entropies[name], axis=0)
if summarizer is not None:
dependencies.append(tf.summary.scalar(
name=('entropies/' + name), data=entropy, step=self.updates
))
dependencies.extend(self.track(
label='entropy', name=('entropies/' + name), data=entropy
))
entropy = tf.concat(values=tuple(entropies.values()), axis=0)
else:
entropy = self.policy.entropy(
states=policy_states, horizons=policy_horizons,
internals=policy_only_internals, auxiliaries=auxiliaries
)
entropy = tf.math.reduce_mean(input_tensor=entropy, axis=0)
if summarizer is not None:
dependencies.append(
tf.summary.scalar(name='entropy', data=entropy, step=self.updates)
)
dependencies.extend(self.track(label='entropy', name='entropy', data=entropy))
if summarizer is not None:
summarizer.__exit__(None, None, None)
# KL divergence summaries
if isinstance(self.policy, StochasticPolicy) and (
self.summaries == 'all' or 'kl-divergence' in self.summaries or
self.tracking == 'all' or 'kl-divergence' in self.tracking
):
if self.summaries == 'all' or 'kl-divergence' in self.summaries:
summarizer = self.summarizer.as_default()
summarizer.__enter__()
else:
summarizer = None
if len(self.actions_spec) > 1:
kl_divs = self.policy.kl_divergences(
states=policy_states, horizons=policy_horizons,
internals=policy_only_internals, auxiliaries=auxiliaries,
reference=kldiv_reference
)
for name, spec in self.actions_spec.items():
kl_divs[name] = tf.reshape(tensor=kl_divs[name], shape=(-1,))
kl_div = tf.math.reduce_mean(input_tensor=kl_divs[name], axis=0)
if summarizer is not None:
dependencies.append(tf.summary.scalar(
name=('kl-divergences/' + name), data=kl_div, step=self.updates
))
dependencies.extend(self.track(
label='kl-divergence', name=('kl-divergences/' + name), data=kl_div
))
kl_div = tf.concat(values=tuple(kl_divs.values()), axis=0)
else:
kl_div = self.policy.kl_divergence(
states=policy_states, horizons=policy_horizons,
internals=policy_only_internals, auxiliaries=auxiliaries,
reference=kldiv_reference
)
kl_div = tf.math.reduce_mean(input_tensor=kl_div, axis=0)
if summarizer is not None:
dependencies.append(
tf.summary.scalar(name='kl-divergence', data=kl_div, step=self.updates)
)
dependencies.extend(
self.track(label='kl-divergence', name='kl-divergence', data=kl_div)
)
if summarizer is not None:
summarizer.__exit__(None, None, None)
# Increment update
with tf.control_dependencies(control_inputs=dependencies):
assignment = self.updates.assign_add(delta=one, read_value=False)
with tf.control_dependencies(control_inputs=(assignment,)):
dependencies = list()
# Variables summaries
if self.summaries == 'all' or 'variables' in self.summaries:
with self.summarizer.as_default():
for variable in self.trainable_variables:
name = variable.name
assert name[-2] == ':'
if name.startswith(self.name + '/'):
name = 'variables/' + name[len(self.name) + 1: -2]
else:
name = name[:-2]
x = tf.math.reduce_mean(input_tensor=variable)
dependencies.append(tf.summary.scalar(name=name, data=x, step=self.updates))
with tf.control_dependencies(control_inputs=dependencies):
return tf_util.identity(input=optimized)
def _complete_horizon_values(self, indices, internals, reward):
zero = tf_util.constant(value=0, dtype='int')
one = tf_util.constant(value=1, dtype='int')
true = tf_util.constant(value=True, dtype='bool')
reward_horizon = self.reward_horizon.value()
reward_discount = self.reward_discount.value()
# TODO: no need for memory if update episode-based (or not random replay?)
# Internal values to retrieve, depending on different internals configurations
baseline_internals_values = 'internals/baseline'
if self.predict_action_values and self.separate_baseline:
internals_values = 'internals'
elif self.separate_baseline:
if len(self.internals_spec['baseline']) > 0:
internals_values = 'internals/baseline'
else:
internals_values = None
else:
if len(self.internals_spec['policy']) > 0:
internals_values = 'internals/policy'
baseline_internals_values = 'internals/policy'
else:
internals_values = None
if self.baseline.max_past_horizon(on_policy=False) == 0:
# Horizons indexing tensor
batch_size = tf_util.cast(x=tf.shape(input=indices)[0], dtype='int')
starts = tf.range(batch_size)
lengths = tf.ones_like(input=indices)
horizons = tf.stack(values=(starts, lengths), axis=1)
# TODO: remove restriction
if self.predict_action_values and self.separate_baseline:
assert self.policy.max_past_horizon(on_policy=False) == 0
# Retrieve horizon values from memory
values = ('states', 'auxiliaries', 'terminal')
if internals_values is not None:
values += (internals_values,)
offsets, values = self.memory.successors(
indices=indices, horizon=reward_horizon, sequence_values=(), final_values=values
)
states = values['states']
policy_internals = values.get('internals/policy')
baseline_internals = values.get(baseline_internals_values, TensorDict())
auxiliaries = values['auxiliaries']
terminal = values['terminal']
# -1 since successors length >= 1
offsets = offsets - one
else:
baseline_horizon = self.baseline.past_horizon(on_policy=False)
assertions = list()
if self.config.create_tf_assertions and self.predict_action_values:
policy_horizon = self.policy.past_horizon(on_policy=False)
# TODO: remove restriction
assertions.append(tf.debugging.assert_equal(
x=policy_horizon, y=baseline_horizon,
message="Policy and baseline cannot depend on a different number of "
"previous states if predict_action_values is True."
))
with tf.control_dependencies(control_inputs=assertions):
# (Tried to do this more efficiently by differentiating between
# reward horizon >/=/< baseline horizon, but gets too complex since
# it needs to take into account episode start/end edge cases.)
# Retrieve horizon values from memory
offsets, values = self.memory.successors(
indices=indices, horizon=reward_horizon, sequence_values=(),
final_values=('auxiliaries', 'terminal')
)
auxiliaries = values['auxiliaries']
terminal = values['terminal']
# -1 since successors length >= 1
offsets = offsets - one
# Retrieve baseline states sequence and initial internals from memory
if internals_values is None:
horizons, sequence_values = self.memory.predecessors(
indices=(indices + offsets), horizon=baseline_horizon,
sequence_values=('states',), initial_values=()
)
policy_internals = None
baseline_internals = TensorDict()
else:
horizons, sequence_values, initial_values = self.memory.predecessors(
indices=indices, horizon=(baseline_horizon - reward_horizon),
sequence_values=('states',), initial_values=(internals_values,)
)
policy_internals = initial_values.get('internals/policy')
baseline_internals = initial_values.get(baseline_internals_values, TensorDict())
states = sequence_values['states']
# Predict horizon values
if self.predict_action_values:
actions, _ = self.policy.act(
states=states, horizons=horizons, internals=policy_internals,
auxiliaries=auxiliaries, deterministic=true, independent=True
)
horizon_values = self.baseline.action_value(
states=states, horizons=horizons, internals=baseline_internals,
auxiliaries=auxiliaries, actions=actions
)
else:
horizon_values = self.baseline.state_value(
states=states, horizons=horizons, internals=baseline_internals,
auxiliaries=auxiliaries
)
# Value horizon assertions
assertions = list()
if self.config.create_tf_assertions:
assertions.append(tf.debugging.assert_greater_equal(x=offsets, y=zero))
if self.baseline.max_past_horizon(on_policy=False) == 0:
baseline_horizon = self.baseline.past_horizon(on_policy=False)
assertions.append(tf.debugging.assert_less_equal(x=offsets, y=reward_horizon))
# Add appropriately discounted horizon values to reward
with tf.control_dependencies(control_inputs=assertions):
# Pow numerically stable since 0.0 <= discount <= 1.0
discounts = tf.math.pow(x=reward_discount, y=tf_util.cast(x=offsets, dtype='float'))
if not self.predict_terminal_values:
is_terminal = tf.math.equal(x=terminal, y=one)
zeros = tf.zeros_like(input=discounts)
discounts = tf.where(condition=is_terminal, x=zeros, y=discounts)
return reward + discounts * horizon_values
@tf_function(num_args=7)
def loss(self, *, states, horizons, internals, auxiliaries, actions, reward, reference):
if self.baseline_optimizer is None:
policy_internals = internals['policy']
else:
policy_internals = internals
if self.baseline_objective is not None and self.baseline_loss_weight is not None and \
not self.baseline_loss_weight.is_constant(value=0.0):
policy_reference = reference['policy']
else:
policy_reference = reference
# Loss per instance
loss = self.objective.loss(
states=states, horizons=horizons, internals=policy_internals, auxiliaries=auxiliaries,
actions=actions, reward=reward, reference=policy_reference, policy=self.policy,
baseline=(self.baseline if self.separate_baseline else None)
)
# Objective loss
loss = tf.math.reduce_mean(input_tensor=loss, axis=0)
dependencies = self.summary(
label='loss', name='losses/policy-objective-loss', data=loss, step='updates'
)
dependencies.extend(self.track(label='loss', name='policy-objective-loss', data=loss))
# Regularization losses
regularization_loss = self.regularize(
states=states, horizons=horizons, internals=policy_internals, auxiliaries=auxiliaries
)
dependencies.extend(self.summary(
label='loss', name='losses/policy-regularization-loss', data=regularization_loss,
step='updates'
))
dependencies.extend(
self.track(label='loss', name='policy-regularization-loss', data=regularization_loss)
)
loss += regularization_loss
# Baseline loss
if self.baseline_loss_weight is not None and \
not self.baseline_loss_weight.is_constant(value=0.0):
if self.separate_baseline:
baseline_internals = internals['baseline']
else:
baseline_internals = policy_internals
if self.baseline_objective is not None:
baseline_reference = reference['baseline']
else:
baseline_reference = policy_reference
zero = tf_util.constant(value=0.0, dtype='float')
baseline_loss_weight = self.baseline_loss_weight.value()
def no_baseline_loss():
return zero
def apply_baseline_loss():
baseline_loss = self.baseline_loss(
states=states, horizons=horizons, internals=baseline_internals,
auxiliaries=auxiliaries, actions=actions, reward=reward,
reference=baseline_reference
)
return baseline_loss_weight * baseline_loss
loss += tf.cond(
pred=tf.math.equal(x=baseline_loss_weight, y=zero),
true_fn=no_baseline_loss, false_fn=apply_baseline_loss
)
dependencies.extend(self.summary(
label='loss', name='losses/policy-loss', data=loss, step='updates'
))
dependencies.extend(self.track(label='loss', name='policy-loss', data=loss))
with tf.control_dependencies(control_inputs=dependencies):
return tf_util.identity(input=loss)
@tf_function(num_args=4, overwrites_signature=True)
def regularize(self, *, states, horizons, internals, auxiliaries):
regularization_loss = super().regularize()
# Entropy regularization
if not self.entropy_regularization.is_constant(value=0.0):
zero = tf_util.constant(value=0.0, dtype='float')
entropy_regularization = self.entropy_regularization.value()
def no_entropy_regularization():
return zero
def apply_entropy_regularization():
entropy = self.policy.entropy(
states=states, horizons=horizons, internals=internals, auxiliaries=auxiliaries
)
entropy = tf.math.reduce_mean(input_tensor=entropy, axis=0)
return -entropy_regularization * entropy
regularization_loss += tf.cond(
pred=tf.math.equal(x=entropy_regularization, y=zero),
true_fn=no_entropy_regularization, false_fn=apply_entropy_regularization
)
return regularization_loss
@tf_function(num_args=7)
def baseline_loss(
self, *, states, horizons, internals, auxiliaries, actions, reward, reference
):
# Loss per instance
loss = self.baseline_objective.loss(
states=states, horizons=horizons, internals=internals, auxiliaries=auxiliaries,
actions=actions, reward=reward, reference=reference, policy=self.baseline
)
# Objective loss
loss = tf.math.reduce_mean(input_tensor=loss, axis=0)
dependencies = list()
if self.separate_baseline:
dependencies.extend(self.summary(
label='loss', name='losses/baseline-objective-loss', data=loss, step='updates'
))
dependencies.extend(
self.track(label='loss', name='baseline-objective-loss', data=loss)
)
# Regularization losses
regularization_loss = self.baseline.regularize()
dependencies.extend(self.summary(
label='loss', name='losses/baseline-regularization-loss',
data=regularization_loss, step='updates'
))
dependencies.extend(self.track(
label='loss', name='baseline-regularization-loss', data=regularization_loss
))
loss += regularization_loss
dependencies.extend(self.summary(
label='loss', name='losses/baseline-loss', data=loss, step='updates'
))
dependencies.extend(self.track(label='loss', name='baseline-loss', data=loss))
with tf.control_dependencies(control_inputs=dependencies):
return tf_util.identity(input=loss)
|
a5187b194ba3e0bc87b8e3d92ebd1da62ee3bf5e
|
e910318d01528d82040507a49eeeb8dade45b31f
|
/examples/pins.py
|
3a8472e8a6849db30165fb5e8fc74859a485e3fe
|
[
"MIT"
] |
permissive
|
pfalcon/pycopy
|
e844480a5e5cd463530328889daed2ba87552b8a
|
3ac90ae9c3c6bbebfba9cada2d37025e35c62796
|
refs/heads/pfalcon
| 2023-08-30T09:39:52.290147
| 2022-09-08T16:42:38
| 2022-09-08T16:42:38
| 15,507,576
| 753
| 71
|
MIT
| 2021-05-08T04:59:21
| 2013-12-29T11:38:47
|
C
|
UTF-8
|
Python
| false
| false
| 1,966
|
py
|
pins.py
|
# Print a nice list of pins, their current settings, and available afs.
# Requires pins_af.py from ports/stm32/build-PYBV10/ directory.
import pyb
import pins_af
def af():
max_name_width = 0
max_af_width = 0
for pin_entry in pins_af.PINS_AF:
max_name_width = max(max_name_width, len(pin_entry[0]))
for af_entry in pin_entry[1:]:
max_af_width = max(max_af_width, len(af_entry[1]))
for pin_entry in pins_af.PINS_AF:
pin_name = pin_entry[0]
print("%-*s " % (max_name_width, pin_name), end="")
for af_entry in pin_entry[1:]:
print("%2d: %-*s " % (af_entry[0], max_af_width, af_entry[1]), end="")
print("")
def pins():
mode_str = {
pyb.Pin.IN: "IN",
pyb.Pin.OUT_PP: "OUT_PP",
pyb.Pin.OUT_OD: "OUT_OD",
pyb.Pin.AF_PP: "AF_PP",
pyb.Pin.AF_OD: "AF_OD",
pyb.Pin.ANALOG: "ANALOG",
}
pull_str = {pyb.Pin.PULL_NONE: "", pyb.Pin.PULL_UP: "PULL_UP", pyb.Pin.PULL_DOWN: "PULL_DOWN"}
width = [0, 0, 0, 0]
rows = []
for pin_entry in pins_af.PINS_AF:
row = []
pin_name = pin_entry[0]
pin = pyb.Pin(pin_name)
pin_mode = pin.mode()
row.append(pin_name)
row.append(mode_str[pin_mode])
row.append(pull_str[pin.pull()])
if pin_mode == pyb.Pin.AF_PP or pin_mode == pyb.Pin.AF_OD:
pin_af = pin.af()
for af_entry in pin_entry[1:]:
if pin_af == af_entry[0]:
af_str = "%d: %s" % (pin_af, af_entry[1])
break
else:
af_str = "%d" % pin_af
else:
af_str = ""
row.append(af_str)
for col in range(len(width)):
width[col] = max(width[col], len(row[col]))
rows.append(row)
for row in rows:
for col in range(len(width)):
print("%-*s " % (width[col], row[col]), end="")
print("")
|
6c3155b31dd68205ffff01f49894890cdfff4c40
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/plugins/hg4idea/testData/bin/hgext/infinitepush/store.py
|
ed449b2cc6f3f040331a552fd707f9caea691400
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 6,296
|
py
|
store.py
|
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
# based on bundleheads extension by Gregory Szorc <gps@mozilla.com>
from __future__ import absolute_import
import abc
import os
import subprocess
from mercurial.node import hex
from mercurial.pycompat import open
from mercurial import pycompat
from mercurial.utils import (
hashutil,
procutil,
)
class BundleWriteException(Exception):
pass
class BundleReadException(Exception):
pass
class abstractbundlestore(object): # pytype: disable=ignored-metaclass
"""Defines the interface for bundle stores.
A bundle store is an entity that stores raw bundle data. It is a simple
key-value store. However, the keys are chosen by the store. The keys can
be any Python object understood by the corresponding bundle index (see
``abstractbundleindex`` below).
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def write(self, data):
"""Write bundle data to the store.
This function receives the raw data to be written as a str.
Throws BundleWriteException
The key of the written data MUST be returned.
"""
@abc.abstractmethod
def read(self, key):
"""Obtain bundle data for a key.
Returns None if the bundle isn't known.
Throws BundleReadException
The returned object should be a file object supporting read()
and close().
"""
class filebundlestore(object):
"""bundle store in filesystem
meant for storing bundles somewhere on disk and on network filesystems
"""
def __init__(self, ui, repo):
self.ui = ui
self.repo = repo
self.storepath = ui.configpath(b'scratchbranch', b'storepath')
if not self.storepath:
self.storepath = self.repo.vfs.join(
b"scratchbranches", b"filebundlestore"
)
if not os.path.exists(self.storepath):
os.makedirs(self.storepath)
def _dirpath(self, hashvalue):
"""First two bytes of the hash are the name of the upper
level directory, next two bytes are the name of the
next level directory"""
return os.path.join(self.storepath, hashvalue[0:2], hashvalue[2:4])
def _filepath(self, filename):
return os.path.join(self._dirpath(filename), filename)
def write(self, data):
filename = hex(hashutil.sha1(data).digest())
dirpath = self._dirpath(filename)
if not os.path.exists(dirpath):
os.makedirs(dirpath)
with open(self._filepath(filename), b'wb') as f:
f.write(data)
return filename
def read(self, key):
try:
with open(self._filepath(key), b'rb') as f:
return f.read()
except IOError:
return None
def format_placeholders_args(args, filename=None, handle=None):
"""Formats `args` with Infinitepush replacements.
Hack to get `str.format()`-ed strings working in a BC way with
bytes.
"""
formatted_args = []
for arg in args:
if filename and arg == b'{filename}':
formatted_args.append(filename)
elif handle and arg == b'{handle}':
formatted_args.append(handle)
else:
formatted_args.append(arg)
return formatted_args
class externalbundlestore(abstractbundlestore):
def __init__(self, put_binary, put_args, get_binary, get_args):
"""
`put_binary` - path to binary file which uploads bundle to external
storage and prints key to stdout
`put_args` - format string with additional args to `put_binary`
{filename} replacement field can be used.
`get_binary` - path to binary file which accepts filename and key
(in that order), downloads bundle from store and saves it to file
`get_args` - format string with additional args to `get_binary`.
{filename} and {handle} replacement field can be used.
"""
self.put_args = put_args
self.get_args = get_args
self.put_binary = put_binary
self.get_binary = get_binary
def _call_binary(self, args):
p = subprocess.Popen(
pycompat.rapply(procutil.tonativestr, args),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
)
stdout, stderr = p.communicate()
returncode = p.returncode
return returncode, stdout, stderr
def write(self, data):
# Won't work on windows because you can't open file second time without
# closing it
# TODO: rewrite without str.format() and replace NamedTemporaryFile()
# with pycompat.namedtempfile()
with pycompat.namedtempfile() as temp:
temp.write(data)
temp.flush()
temp.seek(0)
formatted_args = format_placeholders_args(
self.put_args, filename=temp.name
)
returncode, stdout, stderr = self._call_binary(
[self.put_binary] + formatted_args
)
if returncode != 0:
raise BundleWriteException(
b'Failed to upload to external store: %s' % stderr
)
stdout_lines = stdout.splitlines()
if len(stdout_lines) == 1:
return stdout_lines[0]
else:
raise BundleWriteException(
b'Bad output from %s: %s' % (self.put_binary, stdout)
)
def read(self, handle):
# Won't work on windows because you can't open file second time without
# closing it
with pycompat.namedtempfile() as temp:
formatted_args = format_placeholders_args(
self.get_args, filename=temp.name, handle=handle
)
returncode, stdout, stderr = self._call_binary(
[self.get_binary] + formatted_args
)
if returncode != 0:
raise BundleReadException(
b'Failed to download from external store: %s' % stderr
)
return temp.read()
|
b18d4584b0e950b7bafa61c0b9fe222f97390d91
|
e65a4dbfbfb0e54e59787ba7741efee12f7687f3
|
/www/py-lektor/files/patch-setup.py
|
22af075176c56788564d6358657de358dd153da0
|
[
"BSD-2-Clause"
] |
permissive
|
freebsd/freebsd-ports
|
86f2e89d43913412c4f6b2be3e255bc0945eac12
|
605a2983f245ac63f5420e023e7dce56898ad801
|
refs/heads/main
| 2023-08-30T21:46:28.720924
| 2023-08-30T19:33:44
| 2023-08-30T19:33:44
| 1,803,961
| 916
| 918
|
NOASSERTION
| 2023-09-08T04:06:26
| 2011-05-26T11:15:35
| null |
UTF-8
|
Python
| false
| false
| 156
|
py
|
patch-setup.py
|
--- setup.py.orig 2022-07-31 16:11:11 UTC
+++ setup.py
@@ -0,0 +1,6 @@
+from setuptools import setup
+
+setup(
+ name="Lektor",
+ version="3.3.6",
+)
|
fa9170e98b5c3844314bc7a04a8808b0545c458b
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/CalibTracker/Configuration/python/SiPixel_FrontierConditions_cff.py
|
a48bd342eee930e744487eeee21e51f99bbc7778
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 350
|
py
|
SiPixel_FrontierConditions_cff.py
|
import FWCore.ParameterSet.Config as cms
#
# Pixel Frontier conditions from Production DB
#
from CalibTracker.Configuration.SiPixelCabling.SiPixelCabling_Frontier_cff import *
from CalibTracker.Configuration.SiPixelGain.SiPixelGain_Frontier_cff import *
from CalibTracker.Configuration.SiPixelLorentzAngle.SiPixelLorentzAngle_Frontier_cff import *
|
4cb100a9f3354f2dc8fd0206d342d27bce4e739f
|
bfc42c114f652012b6cfd14e7cccf52cb6b9ac7e
|
/src/spdx_tools/spdx/validation/package_validator.py
|
25cd6147fa5296c64dc1c4e62da4a6d7482307e0
|
[
"Apache-2.0",
"GPL-2.0-only"
] |
permissive
|
spdx/tools-python
|
05a952501af2ac608678cb1737f7c661f6091fa2
|
777bd274dd06cb24342738df7da5ab285d652350
|
refs/heads/main
| 2023-08-31T09:39:52.930063
| 2023-08-24T06:39:48
| 2023-08-24T10:22:33
| 32,761,058
| 147
| 136
|
Apache-2.0
| 2023-09-14T15:50:59
| 2015-03-23T21:54:39
|
Python
|
UTF-8
|
Python
| false
| false
| 7,111
|
py
|
package_validator.py
|
# SPDX-FileCopyrightText: 2022 spdx contributors
#
# SPDX-License-Identifier: Apache-2.0
from beartype.typing import List, Optional
from spdx_tools.spdx.model import Document, File, Package, Relationship, RelationshipType
from spdx_tools.spdx.model.relationship_filters import filter_by_type_and_origin, filter_by_type_and_target
from spdx_tools.spdx.spdx_element_utils import get_element_type_from_spdx_id
from spdx_tools.spdx.validation.checksum_validator import validate_checksums
from spdx_tools.spdx.validation.external_package_ref_validator import validate_external_package_refs
from spdx_tools.spdx.validation.license_expression_validator import (
validate_license_expression,
validate_license_expressions,
)
from spdx_tools.spdx.validation.package_verification_code_validator import validate_verification_code
from spdx_tools.spdx.validation.spdx_id_validators import validate_spdx_id
from spdx_tools.spdx.validation.uri_validators import validate_download_location, validate_url
from spdx_tools.spdx.validation.validation_message import SpdxElementType, ValidationContext, ValidationMessage
def validate_packages(
packages: List[Package], spdx_version: str, document: Optional[Document] = None
) -> List[ValidationMessage]:
validation_messages: List[ValidationMessage] = []
if document:
for package in packages:
validation_messages.extend(validate_package_within_document(package, spdx_version, document))
else:
for package in packages:
validation_messages.extend(validate_package(package, spdx_version))
return validation_messages
def validate_package_within_document(
package: Package, spdx_version: str, document: Document
) -> List[ValidationMessage]:
validation_messages: List[ValidationMessage] = []
context = ValidationContext(
spdx_id=package.spdx_id,
parent_id=document.creation_info.spdx_id,
element_type=SpdxElementType.PACKAGE,
full_element=package,
)
for message in validate_spdx_id(package.spdx_id, document):
validation_messages.append(ValidationMessage(message, context))
if not package.files_analyzed:
package_contains_relationships = filter_by_type_and_origin(
document.relationships, RelationshipType.CONTAINS, package.spdx_id
)
package_contains_file_relationships = [
relationship
for relationship in package_contains_relationships
if get_element_type_from_spdx_id(relationship.related_spdx_element_id, document) == File
]
contained_in_package_relationships = filter_by_type_and_target(
document.relationships, RelationshipType.CONTAINED_BY, package.spdx_id
)
file_contained_in_package_relationships = [
relationship
for relationship in contained_in_package_relationships
if get_element_type_from_spdx_id(relationship.spdx_element_id, document) == File
]
combined_relationships: List[Relationship] = (
package_contains_file_relationships + file_contained_in_package_relationships
)
if combined_relationships:
validation_messages.append(
ValidationMessage(
f"package must contain no elements if files_analyzed is False, but found {combined_relationships}",
context,
)
)
validation_messages.extend(validate_license_expression(package.license_concluded, document, package.spdx_id))
license_info_from_files = package.license_info_from_files
if license_info_from_files:
if not package.files_analyzed:
validation_messages.append(
ValidationMessage(
f"license_info_from_files must be None if files_analyzed is False, but is: "
f"{license_info_from_files}",
context,
)
)
else:
validation_messages.extend(
validate_license_expressions(license_info_from_files, document, package.spdx_id)
)
validation_messages.extend(validate_license_expression(package.license_declared, document, package.spdx_id))
validation_messages.extend(validate_package(package, spdx_version, context))
return validation_messages
def validate_package(
package: Package, spdx_version: str, context: Optional[ValidationContext] = None
) -> List[ValidationMessage]:
validation_messages: List[ValidationMessage] = []
if not context:
context = ValidationContext(
spdx_id=package.spdx_id, element_type=SpdxElementType.PACKAGE, full_element=package
)
download_location = package.download_location
if isinstance(download_location, str):
for message in validate_download_location(download_location):
validation_messages.append(ValidationMessage("package download_location " + message, context))
homepage = package.homepage
if isinstance(homepage, str):
for message in validate_url(homepage):
validation_messages.append(ValidationMessage("homepage " + message, context))
verification_code = package.verification_code
if verification_code:
if not package.files_analyzed:
validation_messages.append(
ValidationMessage(
f"verification_code must be None if files_analyzed is False, but is: {verification_code}", context
)
)
else:
validation_messages.extend(validate_verification_code(verification_code, package.spdx_id))
validation_messages.extend(validate_checksums(package.checksums, package.spdx_id, spdx_version))
validation_messages.extend(
validate_external_package_refs(package.external_references, package.spdx_id, spdx_version)
)
if spdx_version == "SPDX-2.2":
if package.primary_package_purpose is not None:
validation_messages.append(
ValidationMessage("primary_package_purpose is not supported in SPDX-2.2", context)
)
if package.built_date is not None:
validation_messages.append(ValidationMessage("built_date is not supported in SPDX-2.2", context))
if package.release_date is not None:
validation_messages.append(ValidationMessage("release_date is not supported in SPDX-2.2", context))
if package.valid_until_date is not None:
validation_messages.append(ValidationMessage("valid_until_date is not supported in SPDX-2.2", context))
if package.license_concluded is None:
validation_messages.append(ValidationMessage("license_concluded is mandatory in SPDX-2.2", context))
if package.license_declared is None:
validation_messages.append(ValidationMessage("license_declared is mandatory in SPDX-2.2", context))
if package.copyright_text is None:
validation_messages.append(ValidationMessage("copyright_text is mandatory in SPDX-2.2", context))
return validation_messages
|
5e3d295317f645b26d9f85dbad0b2a21079e176b
|
4bcc9806152542ab43fc2cf47c499424f200896c
|
/tensorflow/python/tpu/feature_column_v2_test.py
|
f278295c27683a4b815c38ebba2308cc2feb3f21
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] |
permissive
|
tensorflow/tensorflow
|
906276dbafcc70a941026aa5dc50425ef71ee282
|
a7f3934a67900720af3d3b15389551483bee50b8
|
refs/heads/master
| 2023-08-25T04:24:41.611870
| 2023-08-25T04:06:24
| 2023-08-25T04:14:08
| 45,717,250
| 208,740
| 109,943
|
Apache-2.0
| 2023-09-14T20:55:50
| 2015-11-07T01:19:20
|
C++
|
UTF-8
|
Python
| false
| false
| 29,823
|
py
|
feature_column_v2_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""Tests for python.tpu.feature_column."""
import copy
from absl.testing import parameterized
from keras.feature_column import dense_features as df_lib
from keras.feature_column import sequence_feature_column as sfc_lib
from tensorflow.python.client import session
from tensorflow.python.feature_column import feature_column_lib as fc_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.tpu import feature_column_v2 as tpu_fc
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu import tpu_function
def _initialized_session():
sess = session.Session()
sess.run(variables_lib.global_variables_initializer())
sess.run(lookup_ops.tables_initializer())
return sess
class _TestStateManager(fc_lib.StateManager):
def __init__(self, trainable=True):
self._all_variables = {}
self._trainable = trainable
def create_variable(self,
feature_column,
name,
shape,
dtype=None,
trainable=True,
use_resource=True,
initializer=None):
if feature_column not in self._all_variables:
self._all_variables[feature_column] = {}
var_dict = self._all_variables[feature_column]
if name in var_dict:
return var_dict[name]
else:
var = variable_scope.get_variable(
name=name,
shape=shape,
dtype=dtype,
trainable=self._trainable and trainable,
use_resource=use_resource,
initializer=initializer)
var_dict[name] = var
return var
def get_variable(self, feature_column, name):
return self._all_variables[feature_column][name]
class EmbeddingColumnTestV2(test.TestCase, parameterized.TestCase):
def test_defaults(self):
categorical_column = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_dimension = 2
embedding_column = tpu_fc.embedding_column_v2(
categorical_column, dimension=embedding_dimension)
# Can't test default initializer as it's a random function.
self.assertIs(categorical_column, embedding_column.categorical_column)
self.assertEqual(embedding_dimension, embedding_column.dimension)
self.assertEqual('mean', embedding_column.combiner)
self.assertEqual('aaa_embedding', embedding_column.name)
self.assertEqual((embedding_dimension,), embedding_column.variable_shape)
def test_all_constructor_args(self):
categorical_column = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_dimension = 2
embedding_column = tpu_fc.embedding_column_v2(
categorical_column,
dimension=embedding_dimension,
combiner='my_combiner',
initializer=lambda: 'my_initializer')
self.assertIs(categorical_column, embedding_column.categorical_column)
self.assertEqual(embedding_dimension, embedding_column.dimension)
self.assertEqual('my_combiner', embedding_column.combiner)
self.assertEqual('my_initializer', embedding_column.initializer())
self.assertEqual('aaa_embedding', embedding_column.name)
self.assertEqual((embedding_dimension,), embedding_column.variable_shape)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column._parse_example_spec)
@parameterized.named_parameters(
{
'testcase_name': 'use_safe_embedding_lookup',
'use_safe_embedding_lookup': True,
}, {
'testcase_name': 'dont_use_safe_embedding_lookup',
'use_safe_embedding_lookup': False,
})
@test_util.deprecated_graph_mode_only
def test_feature_layer_cpu(self, use_safe_embedding_lookup):
# Inputs.
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 1), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 2))
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info=None):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0, ids [2], embedding = [7, 11]
(7., 11.),
# example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
(2., 3.5),
# example 2, ids [], embedding = [0, 0]
(0., 0.),
# example 3, ids [1], embedding = [3, 5]
(3., 5.),
)
expected_lookups_sequence = (
# example 0, ids [2], embedding = [[7, 11], [0, 0]]
((7., 11.), (0., 0.),),
# example 1, ids [0, 1], embedding = [[1, 2], [3. 5]]
((1., 2.), (3., 5.),),
# example 2, ids [], embedding = [0, 0]
((0., 0.), (0., 0.),),
# example 3, ids [1], embedding = [3, 5]
((3., 5.), (0., 0.),),
)
# Build columns.
categorical_column = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
sequence_categorical_column = (
fc_lib.sequence_categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size))
embedding_column = tpu_fc.embedding_column_v2(
categorical_column,
dimension=embedding_dimension,
initializer=_initializer,
use_safe_embedding_lookup=use_safe_embedding_lookup)
sequence_embedding_column = tpu_fc.embedding_column_v2(
sequence_categorical_column,
dimension=embedding_dimension,
initializer=_initializer,
max_sequence_length=2,
use_safe_embedding_lookup=use_safe_embedding_lookup)
# Provide sparse input and get dense result.
features = {'aaa': sparse_input, 'bbb': sparse_input}
dense_features = df_lib.DenseFeatures([embedding_column])
sequence_features = sfc_lib.SequenceFeatures([sequence_embedding_column])
embedding_lookup = dense_features(features)
sequence_embedding_lookup = sequence_features(features)
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(
('dense_features/aaa_embedding/embedding_weights:0',
'sequence_features/bbb_embedding/embedding_weights:0',),
tuple([v.name for v in global_vars]))
with _initialized_session():
self.assertAllEqual(embedding_values, global_vars[0])
self.assertAllEqual(expected_lookups, embedding_lookup)
self.assertAllEqual(expected_lookups_sequence,
sequence_embedding_lookup[0].eval())
# The graph will still have SparseFillEmptyRows due to sequence being
# a Rank3 embedding lookup.
if use_safe_embedding_lookup:
self.assertEqual(2, [
x.type for x in ops.get_default_graph().get_operations()
].count('SparseFillEmptyRows'))
else:
self.assertEqual(1, [
x.type for x in ops.get_default_graph().get_operations()
].count('SparseFillEmptyRows'))
def test_deepcopy(self):
categorical_column = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_column = tpu_fc.embedding_column_v2(
categorical_column, dimension=2)
embedding_column_copy = copy.deepcopy(embedding_column)
self.assertEqual(embedding_column.dimension,
embedding_column_copy.dimension)
self.assertEqual(embedding_column._max_sequence_length,
embedding_column_copy._max_sequence_length)
def test_with_scope_validation(self):
categorical_column = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_dimension = 2
initializer = init_ops.truncated_normal_initializer(mean=0.0, stddev=.5)
embedding_column = tpu_fc._TPUEmbeddingColumnV2(
categorical_column=categorical_column,
dimension=embedding_dimension,
combiner='mean',
initializer=initializer,
max_sequence_length=0,
learning_rate_fn=None,
use_safe_embedding_lookup=True,
bypass_scope_validation=False)
self.assertIs(categorical_column, embedding_column.categorical_column)
self.assertEqual(embedding_dimension, embedding_column.dimension)
state_manager = _TestStateManager()
with tpu_function.tpu_shard_context(1):
with variable_scope.variable_scope('tower1/scope1'):
embedding_column.create_state(state_manager)
with variable_scope.variable_scope('tower2/scope2'):
# With default scope validation, the same column cannot be used in a new
# variable scope.
with self.assertRaisesRegex(ValueError,
'the variable scope name is different'):
embedding_column.create_state(state_manager)
def test_bypass_scope_validation(self):
categorical_column = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_dimension = 2
initializer = init_ops.truncated_normal_initializer(mean=0.0, stddev=.5)
embedding_column = tpu_fc._TPUEmbeddingColumnV2(
categorical_column=categorical_column,
dimension=embedding_dimension,
combiner='mean',
initializer=initializer,
max_sequence_length=0,
learning_rate_fn=None,
use_safe_embedding_lookup=True,
bypass_scope_validation=True)
self.assertIs(categorical_column, embedding_column.categorical_column)
self.assertEqual(embedding_dimension, embedding_column.dimension)
state_manager = _TestStateManager()
with tpu_function.tpu_shard_context(1):
with variable_scope.variable_scope('tower1/scope1'):
embedding_column.create_state(state_manager)
with variable_scope.variable_scope('tower2/scope2'):
embedding_column.create_state(state_manager)
def test_deepcopy_with_bypass_scope_validation(self):
categorical_column = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_dimension = 2
initializer = init_ops.truncated_normal_initializer(mean=0.0, stddev=.5)
embedding_column = tpu_fc._TPUEmbeddingColumnV2(
categorical_column=categorical_column,
dimension=embedding_dimension,
combiner='mean',
initializer=initializer,
max_sequence_length=0,
use_safe_embedding_lookup=False,
bypass_scope_validation=True)
embedding_column_copy = copy.deepcopy(embedding_column)
self.assertEqual(embedding_dimension, embedding_column_copy.dimension)
self.assertEqual(embedding_column._max_sequence_length,
embedding_column_copy._max_sequence_length)
self.assertTrue(embedding_column_copy._bypass_scope_validation)
self.assertFalse(embedding_column_copy.use_safe_embedding_lookup)
class SharedEmbeddingColumnTestV2(test.TestCase, parameterized.TestCase):
@test_util.deprecated_graph_mode_only
def test_defaults(self):
vocabulary_size = 3
categorical_column_a = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = fc_lib.categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
embedding_dimension = 2
embedding_column_b, embedding_column_a = tpu_fc.shared_embedding_columns_v2(
[categorical_column_b, categorical_column_a],
dimension=embedding_dimension)
self.assertIs(categorical_column_a, embedding_column_a.categorical_column)
self.assertIs(categorical_column_b, embedding_column_b.categorical_column)
self.assertEqual((vocabulary_size, embedding_dimension),
embedding_column_a.get_embedding_table_size())
self.assertEqual((vocabulary_size, embedding_dimension),
embedding_column_a.get_embedding_table_size())
self.assertEqual('mean', embedding_column_a.combiner)
self.assertEqual('mean', embedding_column_b.combiner)
self.assertIsNotNone(embedding_column_a.get_initializer())
self.assertIsNotNone(embedding_column_b.get_initializer())
self.assertEqual('aaa_bbb_shared_embedding',
embedding_column_a.get_embedding_var_name())
self.assertEqual('aaa_bbb_shared_embedding',
embedding_column_b.get_embedding_var_name())
self.assertEqual('aaa_shared_embedding', embedding_column_a.name)
self.assertEqual('bbb_shared_embedding', embedding_column_b.name)
self.assertEqual((embedding_dimension,), embedding_column_a.variable_shape)
self.assertEqual((embedding_dimension,), embedding_column_b.variable_shape)
@test_util.deprecated_graph_mode_only
def test_all_constructor_args(self):
vocabulary_size = 3
categorical_column_a = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = fc_lib.categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
embedding_dimension = 2
embedding_column_a, embedding_column_b = tpu_fc.shared_embedding_columns_v2(
[categorical_column_a, categorical_column_b],
dimension=embedding_dimension,
combiner='my_combiner',
initializer=lambda: 'my_initializer',
shared_embedding_collection_name='var_scope_name')
self.assertIs(categorical_column_a, embedding_column_a.categorical_column)
self.assertIs(categorical_column_b, embedding_column_b.categorical_column)
self.assertEqual((vocabulary_size, embedding_dimension),
embedding_column_a.get_embedding_table_size())
self.assertEqual((vocabulary_size, embedding_dimension),
embedding_column_a.get_embedding_table_size())
self.assertEqual('my_combiner', embedding_column_a.combiner)
self.assertEqual('my_combiner', embedding_column_b.combiner)
self.assertEqual('my_initializer', embedding_column_a.get_initializer()())
self.assertEqual('my_initializer', embedding_column_b.get_initializer()())
self.assertEqual('var_scope_name',
embedding_column_a.get_embedding_var_name())
self.assertEqual('var_scope_name',
embedding_column_b.get_embedding_var_name())
self.assertEqual('aaa_shared_embedding', embedding_column_a.name)
self.assertEqual('bbb_shared_embedding', embedding_column_b.name)
self.assertEqual((embedding_dimension,), embedding_column_a.variable_shape)
self.assertEqual((embedding_dimension,), embedding_column_b.variable_shape)
@parameterized.named_parameters(
{
'testcase_name': 'use_safe_embedding_lookup',
'use_safe_embedding_lookup': True
}, {
'testcase_name': 'dont_use_safe_embedding_lookup',
'use_safe_embedding_lookup': False
})
@test_util.deprecated_graph_mode_only
def test_feature_layer_cpu(self, use_safe_embedding_lookup):
# Inputs.
vocabulary_size = 3
input_a = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
input_b = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(3, 2))
input_features = {'aaa': input_a, 'bbb': input_b}
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info=None):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups_a = (
# example 0:
(7., 11.), # ids [2], embedding = [7, 11]
# example 1:
(2., 3.5), # ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
)
expected_lookups_b = (
# example 0:
((7., 11.), (0., 0.),), # ids [2], embedding = [[7, 11], [0, 0]]
# example 1:
((1., 2.), (3., 5.),), # ids [0, 1], embedding = [[1, 2], [3, 5]]
# example 2:
((0., 0.), (0., 0.),), # ids [], embedding = [[0, 0], [0, 0]]
)
# Build columns.
categorical_column_a = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = fc_lib.sequence_categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
embedding_column_a, embedding_column_b = tpu_fc.shared_embedding_columns_v2(
[categorical_column_a, categorical_column_b],
dimension=embedding_dimension,
initializer=_initializer,
max_sequence_lengths=[0, 2],
use_safe_embedding_lookup=use_safe_embedding_lookup)
# Provide sparse input and get dense result.
dense_features = df_lib.DenseFeatures([embedding_column_a])
sequence_features = sfc_lib.SequenceFeatures([embedding_column_b])
embedding_lookup_a = dense_features(input_features)
embedding_lookup_b = sequence_features(input_features)
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(
('aaa_bbb_shared_embedding:0',),
tuple([v.name for v in global_vars]))
embedding_var = global_vars[0]
with _initialized_session():
self.assertAllEqual(embedding_values, embedding_var)
self.assertAllEqual(expected_lookups_a, embedding_lookup_a)
self.assertAllEqual(expected_lookups_b,
embedding_lookup_b[0].eval())
# The graph will still have SparseFillEmptyRows due to sequence being
# a Rank3 embedding lookup.
if use_safe_embedding_lookup:
self.assertEqual(2, [
x.type for x in ops.get_default_graph().get_operations()
].count('SparseFillEmptyRows'))
else:
self.assertEqual(1, [
x.type for x in ops.get_default_graph().get_operations()
].count('SparseFillEmptyRows'))
def test_deepcopy(self):
vocabulary_size = 3
categorical_column_a = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = fc_lib.categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
embedding_dimension = 2
columns = tpu_fc.shared_embedding_columns_v2(
[categorical_column_b, categorical_column_a],
dimension=embedding_dimension)
columns_copy = copy.deepcopy(columns)
self.assertEqual(
[column._shared_embedding_collection_name for column in columns],
[column._shared_embedding_collection_name for column in columns_copy])
class DeviceSpecificEmbeddingColumnTestV2(test.TestCase,
parameterized.TestCase):
@parameterized.named_parameters(
{
'testcase_name': 'invalid_shared',
'shared': True,
}, {
'testcase_name': 'invalid_not_shared',
'shared': False,
})
@test_util.deprecated_graph_mode_only
def test_invalid_cases(self, shared):
# Inputs.
input_sparse_tensor = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1), (1, 4)),
values=(2, 0, 1, 3),
dense_shape=(2, 5))
input_features = {'inp': input_sparse_tensor}
# Build columns.
categorical_column_input = fc_lib.categorical_column_with_identity(
key='inp', num_buckets=3)
# Training on TPU with cpu embedding lookups is not supported.
if shared:
embedding_column = tpu_fc.shared_embedding_columns_v2(
[categorical_column_input],
dimension=2,
embedding_lookup_device='cpu',
tensor_core_shape=[None, 3])
else:
embedding_column = tpu_fc.embedding_column_v2(
categorical_column_input,
dimension=2,
embedding_lookup_device='cpu',
tensor_core_shape=[None, 3])
dense_features = df_lib.DenseFeatures(embedding_column)
with self.assertRaisesRegex(
ValueError,
r'.*embedding_lookup_device=\"cpu\" during training is not'):
dense_features(input_features)
# Inference on with TPU Embedding Hardware is not supported.
if shared:
embedding_column = tpu_fc.shared_embedding_columns_v2(
[categorical_column_input],
dimension=2,
embedding_lookup_device='tpu_embedding_core',
tensor_core_shape=[None, 3])
else:
embedding_column = tpu_fc.embedding_column_v2(
categorical_column_input,
dimension=2,
embedding_lookup_device='tpu_embedding_core',
tensor_core_shape=[None, 3])
context = tpu._TPUInferenceContext('tpu_inference')
context.Enter()
dense_features = df_lib.DenseFeatures(embedding_column)
with self.assertRaisesRegex(
ValueError,
r'Using embedding_lookup_device=tpu_embedding_core during inference is '
):
dense_features(input_features)
context.Exit()
@parameterized.named_parameters(
{
'testcase_name': 'combiner_mean_shared',
'shared': True,
'combiner': 'mean'
}, {
'testcase_name': 'combiner_sum_shared',
'shared': True,
'combiner': 'sum'
}, {
'testcase_name': 'combiner_sqrtn_shared',
'shared': True,
'combiner': 'sqrtn'
}, {
'testcase_name': 'combiner_mean_not_shared',
'shared': False,
'combiner': 'mean'
}, {
'testcase_name': 'combiner_sum_not_shared',
'shared': False,
'combiner': 'sum'
}, {
'testcase_name': 'combiner_sqrtn_not_shared',
'shared': False,
'combiner': 'sqrtn'
})
@test_util.deprecated_graph_mode_only
def test_dense_embedding_lookup(self, shared, combiner):
# Inputs.
vocabulary_size = 3
input_sparse_tensor = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1, 3]
indices=((0, 0), (1, 0), (1, 1), (1, 4)),
values=(2, 0, 1, 3),
dense_shape=(2, 5))
input_features = {'inp': input_sparse_tensor}
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.), # id 2
(13., 17.) # id 3
)
def _initializer(shape, dtype, partition_info=None):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Build columns.
categorical_column_input = fc_lib.categorical_column_with_identity(
key='inp', num_buckets=vocabulary_size)
# Set tensor_core_shape to be [None, 20] to ensure some padding and
# dynamic batch size.
if shared:
embedding_column = tpu_fc.shared_embedding_columns_v2(
[categorical_column_input],
dimension=embedding_dimension,
initializer=_initializer,
combiner=combiner,
embedding_lookup_device='tpu_tensor_core',
tensor_core_shape=[None, 3])
else:
embedding_column = tpu_fc.embedding_column_v2(
categorical_column_input,
dimension=embedding_dimension,
initializer=_initializer,
combiner=combiner,
embedding_lookup_device='tpu_tensor_core',
tensor_core_shape=[None, 3])
# Run in TPUContexts so that we hit the intended densification case.
context = tpu._TPUInferenceContext('tpu_inference')
context.Enter()
with tpu_function.tpu_shard_context(1):
dense_features = df_lib.DenseFeatures(embedding_column)
# Sqrtn combiner not supported for now.
if combiner == 'sqrtn':
with self.assertRaisesRegex(
ValueError, 'Dense TPU Embedding does not support combiner'):
embedding_lookup = dense_features(input_features)
return
if combiner == 'mean':
expected_lookups = (
# example 0:
(7., 11.), # ids [2], embedding = [7, 11]
# example 1:
(2., 3.5), # ids [0, 1], embedding = mean([1, 2] + [3, 5]) =
# [2, 3.5]
)
elif combiner == 'sum':
expected_lookups = (
# example 0:
(7., 11.), # ids [2], embedding = [7, 11]
# example 1:
(4., 7), # ids [0, 1], embedding = sum([1, 2] + [3, 5]) = [4, 7]
)
embedding_lookup = dense_features(input_features)
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
if shared:
self.assertCountEqual(('inp_shared_embedding:0',),
tuple([v.name for v in global_vars]))
else:
self.assertCountEqual(
('dense_features/inp_embedding/embedding_weights:0',),
tuple([v.name for v in global_vars]))
embedding_var = global_vars[0]
with _initialized_session():
self.assertAllEqual(embedding_values, embedding_var)
eval_res = embedding_lookup.eval()
self.assertAllEqual(expected_lookups, eval_res)
context.Exit()
@test_util.deprecated_graph_mode_only
def test_empty_row(self):
# Inputs.
vocabulary_size = 3
input_sparse_tensor = sparse_tensor.SparseTensorValue(
# example 0, ids []
# example 1, ids [0, 1, 3]
indices=((1, 0), (1, 1), (1, 4)),
values=(0, 1, 3),
dense_shape=(2, 5))
input_features = {'inp': input_sparse_tensor}
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.), # id 2
(13., 17.) # id 3
)
def _initializer(shape, dtype, partition_info=None):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Build columns.
categorical_column_input = fc_lib.categorical_column_with_identity(
key='inp', num_buckets=vocabulary_size)
# Set tensor_core_shape to be [None, 20] to ensure some padding and
# dynamic batch size.
embedding_column = tpu_fc.embedding_column_v2(
categorical_column_input,
dimension=embedding_dimension,
initializer=_initializer,
combiner='mean',
embedding_lookup_device='tpu_tensor_core',
tensor_core_shape=[None, 3])
# Run in TPUContexts so that we hit the intended densification case.
context = tpu._TPUInferenceContext('tpu_inference')
context.Enter()
with tpu_function.tpu_shard_context(1):
dense_features = df_lib.DenseFeatures(embedding_column)
expected_lookups = (
# example 0:
(0., 0.), # ids [], embedding = [0, 0]
# example 1:
(2., 3.5), # ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
)
embedding_lookup = dense_features(input_features)
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertCountEqual(
('dense_features/inp_embedding/embedding_weights:0',),
tuple([v.name for v in global_vars]))
embedding_var = global_vars[0]
with _initialized_session():
self.assertAllEqual(embedding_values, embedding_var)
eval_res = embedding_lookup.eval()
self.assertAllEqual(expected_lookups, eval_res)
context.Exit()
@test_util.deprecated_graph_mode_only
def test_error_dense_shape_invalid(self):
categorical_column_input = fc_lib.categorical_column_with_identity(
key='inp', num_buckets=5)
with self.assertRaisesRegex(ValueError, 'tensor_core_shape must be size 2'):
tpu_fc.shared_embedding_columns_v2([categorical_column_input],
dimension=20,
tensor_core_shape=[None, 20, 15])
if __name__ == '__main__':
test.main()
|
289942882108cb52dd9e749721538775502bef57
|
752818f443b62f5c7041cc855389762026987174
|
/tests/test_lint.py
|
0a2153dc6ac3eb7afe20a29e2927a96118432ac9
|
[
"MIT"
] |
permissive
|
Pylons/webtest
|
b67c085b5fdcce44acf175b9f8d35575df5bb81e
|
5bc6841351a71569889e11f443a7948cb3ca64f0
|
refs/heads/main
| 2023-09-01T00:57:04.362679
| 2023-06-08T06:54:30
| 2023-06-08T06:54:30
| 6,662,296
| 260
| 97
|
NOASSERTION
| 2023-06-24T14:17:52
| 2012-11-12T23:19:10
|
Python
|
UTF-8
|
Python
| false
| false
| 10,986
|
py
|
test_lint.py
|
import sys
from tests.compat import unittest
from webob import Request, Response
import warnings
from unittest import mock
from io import StringIO
from webtest import TestApp
from webtest.compat import to_bytes
from webtest.lint import check_headers
from webtest.lint import check_content_type
from webtest.lint import check_environ
from webtest.lint import IteratorWrapper
from webtest.lint import WriteWrapper
from webtest.lint import ErrorWrapper
from webtest.lint import InputWrapper
from webtest.lint import to_string
from webtest.lint import middleware
from webtest.lint import _assert_latin1_str
from io import BytesIO
def application(environ, start_response):
req = Request(environ)
resp = Response()
env_input = environ['wsgi.input']
len_body = len(req.body)
env_input.input.seek(0)
if req.path_info == '/read':
resp.body = env_input.read(len_body)
elif req.path_info == '/read_line':
resp.body = env_input.readline(len_body)
elif req.path_info == '/read_lines':
resp.body = b'-'.join(env_input.readlines(len_body))
elif req.path_info == '/close':
resp.body = env_input.close()
return resp(environ, start_response)
class TestLatin1Assertion(unittest.TestCase):
def test_valid_type(self):
value = "useful-inførmation-5"
assert value == _assert_latin1_str(value, "fail")
def test_invalid_type(self):
value = b"useful-information-5"
self.assertRaises(AssertionError, _assert_latin1_str, value, "fail")
class TestToString(unittest.TestCase):
def test_to_string(self):
self.assertEqual(to_string('foo'), 'foo')
self.assertEqual(to_string(b'foo'), 'foo')
class TestMiddleware(unittest.TestCase):
@unittest.skipIf(sys.flags.optimize > 0, "skip assert tests if optimize is enabled")
def test_lint_too_few_args(self):
linter = middleware(application)
with self.assertRaisesRegex(AssertionError, "Two arguments required"):
linter()
with self.assertRaisesRegex(AssertionError, "Two arguments required"):
linter({})
@unittest.skipIf(sys.flags.optimize > 0, "skip assert tests if optimize is enabled")
def test_lint_no_keyword_args(self):
linter = middleware(application)
with self.assertRaisesRegex(AssertionError, "No keyword arguments "
"allowed"):
linter({}, 'foo', baz='baz')
# TODO: test start_response_wrapper
@mock.patch.multiple('webtest.lint',
check_environ=lambda x: True, # don't block too early
InputWrapper=lambda x: True)
def test_lint_iterator_returned(self):
linter = middleware(lambda x, y: None) # None is not an iterator
msg = "The application must return an iterator, if only an empty list"
with self.assertRaisesRegex(AssertionError, msg):
linter({'wsgi.input': 'foo', 'wsgi.errors': 'foo'}, 'foo')
class TestInputWrapper(unittest.TestCase):
def test_read(self):
app = TestApp(application)
resp = app.post('/read', 'hello')
self.assertEqual(resp.body, b'hello')
def test_readline(self):
app = TestApp(application)
resp = app.post('/read_line', 'hello\n')
self.assertEqual(resp.body, b'hello\n')
def test_readlines(self):
app = TestApp(application)
resp = app.post('/read_lines', 'hello\nt\n')
self.assertEqual(resp.body, b'hello\n-t\n')
def test_close(self):
input_wrapper = InputWrapper(None)
self.assertRaises(AssertionError, input_wrapper.close)
def test_iter(self):
data = to_bytes("A line\nAnother line\nA final line\n")
input_wrapper = InputWrapper(BytesIO(data))
self.assertEqual(to_bytes("").join(input_wrapper), data, '')
def test_seek(self):
data = to_bytes("A line\nAnother line\nA final line\n")
input_wrapper = InputWrapper(BytesIO(data))
input_wrapper.seek(0)
self.assertEqual(to_bytes("").join(input_wrapper), data, '')
class TestMiddleware2(unittest.TestCase):
def test_exc_info(self):
def application_exc_info(environ, start_response):
body = to_bytes('body stuff')
headers = [
('Content-Type', 'text/plain; charset=utf-8'),
('Content-Length', str(len(body)))]
# PEP 3333 requires native strings:
headers = [(str(k), str(v)) for k, v in headers]
start_response(to_bytes('200 OK'), headers, ('stuff',))
return [body]
app = TestApp(application_exc_info)
app.get('/')
# don't know what to assert here... a bit cheating, just covers code
class TestCheckContentType(unittest.TestCase):
def test_no_content(self):
status = "204 No Content"
headers = [
('Content-Type', 'text/plain; charset=utf-8'),
('Content-Length', '4')
]
self.assertRaises(AssertionError, check_content_type, status, headers)
def test_no_content_type(self):
status = "200 OK"
headers = [
('Content-Length', '4')
]
self.assertRaises(AssertionError, check_content_type, status, headers)
class TestCheckHeaders(unittest.TestCase):
def test_header_bytes_name(self):
headers = [(b'X-Price', '100')]
self.assertRaises(AssertionError, check_headers, headers)
def test_header_bytes_value(self):
headers = [('X-Price', b'100')]
self.assertRaises(AssertionError, check_headers, headers)
def test_header_non_latin1_value(self):
headers = [('X-Price', '100€')]
self.assertRaises(AssertionError, check_headers, headers)
def test_header_non_latin1_name(self):
headers = [('X-€', 'foo')]
self.assertRaises(AssertionError, check_headers, headers)
class TestCheckEnviron(unittest.TestCase):
def test_no_query_string(self):
environ = {
'REQUEST_METHOD': 'GET',
'SERVER_NAME': 'localhost',
'SERVER_PORT': '80',
'wsgi.version': (1, 0, 1),
'wsgi.input': StringIO('test'),
'wsgi.errors': StringIO(),
'wsgi.multithread': None,
'wsgi.multiprocess': None,
'wsgi.run_once': None,
'wsgi.url_scheme': 'http',
'PATH_INFO': '/',
}
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
check_environ(environ)
self.assertEqual(len(w), 1, "We should have only one warning")
self.assertTrue(
"QUERY_STRING" in str(w[-1].message),
"The warning message should say something about QUERY_STRING")
def test_no_valid_request(self):
environ = {
'REQUEST_METHOD': 'PROPFIND',
'SERVER_NAME': 'localhost',
'SERVER_PORT': '80',
'wsgi.version': (1, 0, 1),
'wsgi.input': StringIO('test'),
'wsgi.errors': StringIO(),
'wsgi.multithread': None,
'wsgi.multiprocess': None,
'wsgi.run_once': None,
'wsgi.url_scheme': 'http',
'PATH_INFO': '/',
'QUERY_STRING': '',
}
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
check_environ(environ)
self.assertEqual(len(w), 1, "We should have only one warning")
self.assertTrue(
"REQUEST_METHOD" in str(w[-1].message),
"The warning message should say something "
"about REQUEST_METHOD")
def test_handles_native_strings_in_variables(self):
path = '/umläut'
environ = {
'REQUEST_METHOD': 'GET',
'SERVER_NAME': 'localhost',
'SERVER_PORT': '80',
'wsgi.version': (1, 0, 1),
'wsgi.input': StringIO('test'),
'wsgi.errors': StringIO(),
'wsgi.multithread': None,
'wsgi.multiprocess': None,
'wsgi.run_once': None,
'wsgi.url_scheme': 'http',
'PATH_INFO': path,
'QUERY_STRING': '',
}
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
check_environ(environ)
self.assertEqual(0, len(w), "We should have no warning")
class TestIteratorWrapper(unittest.TestCase):
def test_close(self):
class MockIterator:
def __init__(self):
self.closed = False
def __iter__(self):
return self
def __next__(self):
return None
next = __next__
def close(self):
self.closed = True
mock = MockIterator()
wrapper = IteratorWrapper(mock, None)
wrapper.close()
self.assertTrue(mock.closed, "Original iterator has not been closed")
class TestWriteWrapper(unittest.TestCase):
@unittest.skipIf(sys.flags.optimize > 0, "skip assert tests if optimize is enabled")
def test_wrong_type(self):
write_wrapper = WriteWrapper(None)
self.assertRaises(AssertionError, write_wrapper, 'not a binary')
def test_normal(self):
class MockWriter:
def __init__(self):
self.written = []
def __call__(self, s):
self.written.append(s)
data = to_bytes('foo')
mock = MockWriter()
write_wrapper = WriteWrapper(mock)
write_wrapper(data)
self.assertEqual(
mock.written, [data],
"WriterWrapper should call original writer when data is binary "
"type")
class TestErrorWrapper(unittest.TestCase):
def test_dont_close(self):
error_wrapper = ErrorWrapper(None)
self.assertRaises(AssertionError, error_wrapper.close)
class FakeError:
def __init__(self):
self.written = []
self.flushed = False
def write(self, s):
self.written.append(s)
def writelines(self, lines):
for line in lines:
self.write(line)
def flush(self):
self.flushed = True
def test_writelines(self):
fake_error = self.FakeError()
error_wrapper = ErrorWrapper(fake_error)
data = [to_bytes('a line'), to_bytes('another line')]
error_wrapper.writelines(data)
self.assertEqual(fake_error.written, data,
"ErrorWrapper should call original writer")
def test_flush(self):
fake_error = self.FakeError()
error_wrapper = ErrorWrapper(fake_error)
error_wrapper.flush()
self.assertTrue(
fake_error.flushed,
"ErrorWrapper should have called original wsgi_errors's flush")
|
4df44807bd8082fb49faaaff61301e94bf0d6a48
|
ffdc77394c5b5532b243cf3c33bd584cbdc65cb7
|
/mindspore/python/mindspore/ops/_grad_experimental/grad_math_ops.py
|
52dda9680d1999d0189047dc79c1c684457cb1f2
|
[
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"MPL-1.0",
"OpenSSL",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause-Open-MPI",
"MIT",
"MPL-2.0-no-copyleft-exception",
"NTP",
"BSD-3-Clause",
"GPL-1.0-or-later",
"0BSD",
"MPL-2.0",
"LicenseRef-scancode-free-unknown",
"AGPL-3.0-only",
"Libpng",
"MPL-1.1",
"IJG",
"GPL-2.0-only",
"BSL-1.0",
"Zlib",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-python-cwi",
"BSD-2-Clause",
"LicenseRef-scancode-gary-s-brown",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"Python-2.0",
"LicenseRef-scancode-mit-nagy",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
] |
permissive
|
mindspore-ai/mindspore
|
ca7d5bb51a3451c2705ff2e583a740589d80393b
|
54acb15d435533c815ee1bd9f6dc0b56b4d4cf83
|
refs/heads/master
| 2023-07-29T09:17:11.051569
| 2023-07-17T13:14:15
| 2023-07-17T13:14:15
| 239,714,835
| 4,178
| 768
|
Apache-2.0
| 2023-07-26T22:31:11
| 2020-02-11T08:43:48
|
C++
|
UTF-8
|
Python
| false
| false
| 44,632
|
py
|
grad_math_ops.py
|
# Copyright 2021-2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Define the grad rules of math related operations."""
import numpy as np
import mindspore.numpy as mnp
from mindspore.common import dtype as mstype
from mindspore.ops import functional as F
from mindspore.ops import operations as P
from mindspore import Tensor
from mindspore.ops.operations.math_ops import Real, Imag, Complex, Angle
from mindspore.ops.operations.math_ops import Polar
from mindspore.ops.operations import _grad_ops as G
from mindspore.ops.operations.math_ops import Lgamma
from mindspore.ops.operations.math_ops import Digamma
from mindspore.ops.operations.math_ops import Polygamma
from mindspore.ops.operations.math_ops import CumulativeLogsumexp
from mindspore.ops.operations.math_ops import MatrixSolve
from mindspore.ops.operations.math_ops import MatrixSolveLs
from mindspore.ops.operations.math_ops import MatrixTriangularSolve
from mindspore.ops.operations.math_ops import NanToNum
from mindspore.ops.operations.math_ops import FFTWithSize
from mindspore.ops.operations.math_ops import Cholesky
from mindspore.ops.operations.math_ops import Fmin
from mindspore.ops.operations.math_ops import CholeskySolve
from mindspore.ops.operations.math_ops import InplaceIndexAdd
from mindspore.ops.operations.math_ops import TridiagonalSolve
from mindspore.ops.operations.math_ops import Diagonal
from mindspore.ops.operations.math_ops import EuclideanNorm
from mindspore.ops.operations.array_ops import Transpose, MatrixSetDiagV3
from mindspore.ops.operations.math_ops import Fmax
from mindspore.ops.operations._inner_ops import DynamicBroadcastGradientArgs
from mindspore.ops.composite.multitype_ops.zeros_like_impl import zeros_like
from mindspore.ops.primitive import _primexpr
from mindspore.ops._grad_experimental.grad_base import bprop_getters
from mindspore.ops._grad_experimental.grad_base import sum_grad_reduce_axis
from mindspore.ops.operations.array_ops import MatrixBandPart
from mindspore.ops.operations.array_ops import ConjugateTranspose
from mindspore.ops.functional import broadcast_gradient_args
transpose = P.Transpose()
_conj = P.Conj()
shape_op = P.Shape()
reduce_sum = P.ReduceSum()
reshape = P.Reshape()
def _adjoint(a):
return cholesky_transpose(_conj(a))
def cholesky_transpose(a):
n = len(a.shape)
n_range = list(range(0, n))
n_range[-1] = n - 2
n_range[-2] = n - 1
return transpose(a, tuple(n_range))
@_primexpr
def renew_dim(shape, dim):
""" Re-new dims"""
new_dim = dim if dim >= 0 else len(shape) + dim
tmp = [i for i in range(len(shape))]
_ = tmp.pop(new_dim)
return tuple(tmp)
@bprop_getters.register(EuclideanNorm)
def get_bprop_euclidean_norm(self):
"""Generate bprop for EuclideanNorm"""
expand_dims = P.ExpandDims()
keep_dims = self.keep_dims
def bprop(x, axes, out, dout):
scale_v = dout / out
if not keep_dims and x.shape != ():
scale_v = expand_dims(scale_v, axes)
return (x * scale_v, zeros_like(axes))
return bprop
@bprop_getters.register(CumulativeLogsumexp)
def get_brop_cumulative_logsumexp(self):
"""Generate bprop for CumulativeLogsumexp"""
exp_op = P.Exp()
greater_op = P.Greater()
log_op = P.Log()
cumulative_op = CumulativeLogsumexp(self.exclusive, not self.reverse)
less_op = P.Less()
neg_op = P.Neg()
cast = P.Cast()
def bprop(x, axis, out, dout):
dtype_min = 0
if x.dtype == mstype.float16:
dtype_min = cast(np.finfo(np.float16).min, x.dtype)
else:
dtype_min = cast(np.finfo(np.float32).min, x.dtype)
log_grad_positive = mnp.where(greater_op(dout, 0), log_op(dout), dtype_min)
log_grad_negative = mnp.where(less_op(dout, 0), log_op(neg_op(dout)), dtype_min)
output_pos = exp_op(cumulative_op(log_grad_positive - out, axis) + x)
output_neg = exp_op(cumulative_op(log_grad_negative - out, axis) + x)
return (output_pos - output_neg, zeros_like(x))
return bprop
@bprop_getters.register(MatrixTriangularSolve)
def get_bprop_matrix_triangular_solve(self):
"""Grad definition for 'MatrixTriangularSolve' operation"""
adjoint_a = self.adjoint
lower_a = self.lower
matrix_triangular_solve_op = P.MatrixTriangularSolve(lower=lower_a, adjoint=not adjoint_a)
mat_mul_2d_op = P.MatMul()
mat_mul_op = P.BatchMatMul()
real_op = P.Real()
imag_op = P.Imag()
neg_op = P.Neg()
complex_op = P.Complex()
matrix_band_part_op = MatrixBandPart()
def bprop(matrix, rhs, out, dout):
grad_rhs = matrix_triangular_solve_op(matrix, dout)
if matrix.dtype == mstype.complex64 or matrix.dtype == mstype.complex128:
grad_rhs_temp = _adjoint(grad_rhs)
out_temp = _adjoint(out)
else:
grad_rhs_temp = cholesky_transpose(grad_rhs)
out_temp = cholesky_transpose(out)
if adjoint_a:
if len(matrix.shape) == 2:
grad_matrix = mat_mul_2d_op(out, grad_rhs_temp)
grad_matrix = neg_op(grad_matrix)
else:
grad_matrix = mat_mul_op(out, grad_rhs_temp)
grad_matrix = neg_op(grad_matrix)
else:
if len(matrix.shape) == 2:
grad_matrix = mat_mul_2d_op(grad_rhs, out_temp)
grad_matrix = neg_op(grad_matrix)
else:
grad_matrix = mat_mul_op(grad_rhs, out_temp)
grad_matrix = neg_op(grad_matrix)
if lower_a:
if grad_matrix.dtype == mstype.complex64 or grad_matrix.dtype == mstype.complex128:
grad_matrix_real = matrix_band_part_op(real_op(grad_matrix), -1, 0)
grad_matrix_imag = matrix_band_part_op(imag_op(grad_matrix), -1, 0)
grad_matrix = complex_op(grad_matrix_real, grad_matrix_imag)
else:
grad_matrix = matrix_band_part_op(grad_matrix, -1, 0)
else:
if grad_matrix.dtype == mstype.complex64 or grad_matrix.dtype == mstype.complex128:
grad_matrix_real = matrix_band_part_op(real_op(grad_matrix), 0, -1)
grad_matrix_imag = matrix_band_part_op(imag_op(grad_matrix), 0, -1)
grad_matrix = complex_op(grad_matrix_real, grad_matrix_imag)
else:
grad_matrix = matrix_band_part_op(grad_matrix, 0, -1)
return (grad_matrix, grad_rhs)
return bprop
@bprop_getters.register(MatrixSolve)
def get_bprop_matrix_solve(self):
"""Generate bprop for MatrixSolve"""
adjoint = self.adjoint
adjoint_a = not adjoint
solve_op = MatrixSolve(adjoint_a)
batchmatmul = P.BatchMatMul(transpose_b=True)
matmul = P.MatMul(transpose_b=True)
neg = P.Neg()
cast = P.Cast()
rank = P.Rank()
def bprop(input_a, input_b, out, dout):
out_type = F.dtype(out)
if out_type == mstype.float64:
out = cast(out, mstype.float32)
grad_b = solve_op(input_a, dout)
grad_b_type = F.dtype(grad_b)
if grad_b_type == mstype.float64:
grad_b = cast(grad_b, mstype.float32)
matrix_rank = rank(input_a)
if adjoint:
if matrix_rank > 2:
grad_a = batchmatmul(out, grad_b)
grad_a = neg(grad_a)
else:
grad_a = matmul(out, grad_b)
grad_a = neg(grad_a)
else:
if matrix_rank > 2:
grad_a = batchmatmul(grad_b, out)
grad_a = neg(grad_a)
else:
grad_a = matmul(grad_b, out)
grad_a = neg(grad_a)
return grad_a, grad_b
return bprop
@_primexpr
def _generate_perm_matrix_solve_ls(x_dim):
perm = tuple(range(x_dim - 2))
perm = perm + (x_dim-1, x_dim-2)
return perm
@bprop_getters.register(MatrixSolveLs)
def get_bprop_matrix_solve_ls(self):
"""Grad definition for 'MatrixSolveLs' operation"""
fast = self.fast
cast = P.Cast()
neg = P.Neg()
rank = P.Rank()
cholesky = Cholesky()
eye = P.Eye()
add = P.Add()
mul = P.Mul()
matmul = P.MatMul()
batch_matmul = P.BatchMatMul()
cholesky_solve = CholeskySolve()
_transpose = Transpose()
conjugate_transpose = ConjugateTranspose()
shape = P.Shape()
_complex = P.Complex()
scalar2tensor = P.ScalarToTensor()
def regularized_gramian_cholesky(matrix, l2, first_kind):
matrix_dim = rank(matrix)
perm = _generate_perm_matrix_solve_ls(matrix_dim)
if matrix.dtype in (mstype.complex64, mstype.complex128):
matrix_temp = conjugate_transpose(matrix, perm)
else:
matrix_temp = _transpose(matrix, perm)
if first_kind:
if matrix_dim > 2:
gramian = batch_matmul(matrix_temp, matrix)
else:
gramian = matmul(matrix_temp, matrix)
else:
if matrix_dim > 2:
gramian = batch_matmul(matrix, matrix_temp)
else:
gramian = matmul(matrix, matrix_temp)
if isinstance(l2, Tensor) or l2 != 0:
matrix_shape = shape(matrix)
if first_kind:
small_dim = matrix_shape[-1]
else:
small_dim = matrix_shape[-2]
identity = eye(small_dim, small_dim, matrix.dtype)
gramian = add(gramian, mul(l2, identity))
#Cholesky not support complex dtype for now
return cholesky(gramian)
def bprop(matrix, rhs, l2, out, dout):
#support dtype:float32
#support dimension: 2D,3D
def over_determined(matrix, rhs, out, l2, dout):
if matrix.dtype == mstype.complex64:
l2_regularizer = _complex(cast(l2, mstype.float32), Tensor(0, mstype.float32))
elif matrix.dtype == mstype.complex128:
l2_regularizer = _complex(cast(l2, mstype.float64), Tensor(0, mstype.float64))
else:
l2_regularizer = cast(l2, matrix.dtype)
chol = cast(regularized_gramian_cholesky(matrix, l2_regularizer, first_kind=True), matrix.dtype)
#CholeskySolve not support complex dtype and just support 2D or 3D matrices for now
z = cholesky_solve(dout, chol)
matrix_dim = rank(matrix)
perm = _generate_perm_matrix_solve_ls(matrix_dim)
if matrix.dtype in (mstype.complex64, mstype.complex128):
z_temp = conjugate_transpose(z, perm)
else:
z_temp = _transpose(z, perm)
if matrix_dim > 2:
xzt = batch_matmul(out, z_temp)
else:
xzt = matmul(out, z_temp)
zx_sym = add(xzt, _transpose(xzt, perm))
if matrix_dim > 2:
grad_a = add(neg(batch_matmul(matrix, zx_sym)), batch_matmul(rhs, z_temp))
grad_b = batch_matmul(matrix, z)
else:
grad_a = add(neg(matmul(matrix, zx_sym)), matmul(rhs, z_temp))
grad_b = matmul(matrix, z)
return (grad_a, grad_b, scalar2tensor(0, l2.dtype))
def under_determined(matrix, rhs, l2, dout):
if matrix.dtype == mstype.complex64:
l2_regularizer = _complex(cast(l2, mstype.float32), Tensor(0, mstype.float32))
elif matrix.dtype == mstype.complex128:
l2_regularizer = _complex(cast(l2, mstype.float64), Tensor(0, mstype.float64))
else:
l2_regularizer = cast(l2, matrix.dtype)
chol = cast(regularized_gramian_cholesky(matrix, l2_regularizer, first_kind=False), matrix.dtype)
matrix_dim = rank(matrix)
perm = _generate_perm_matrix_solve_ls(matrix_dim)
if matrix_dim > 2:
gramian = batch_matmul(matrix, dout)
else:
gramian = matmul(matrix, dout)
#CholeskySolve not support complex dtype and just support 2D or 3D matrices for now
grad_b = cholesky_solve(gramian, chol)
tmp = cholesky_solve(rhs, chol)
if matrix.dtype in (mstype.complex64, mstype.complex128):
tmp_temp = conjugate_transpose(tmp, perm)
matrix_temp = conjugate_transpose(matrix, perm)
else:
tmp_temp = _transpose(tmp, perm)
matrix_temp = _transpose(matrix, perm)
if matrix_dim > 2:
a1 = batch_matmul(tmp_temp, matrix)
a1 = neg(batch_matmul(grad_b, a1))
a2 = dout - batch_matmul(matrix_temp, grad_b)
if matrix.dtype in (mstype.complex64, mstype.complex128):
a2_temp = conjugate_transpose(a2, perm)
else:
a2_temp = _transpose(a2, perm)
a2 = batch_matmul(tmp, a2_temp)
else:
a1 = matmul(tmp_temp, matrix)
a1 = neg(matmul(grad_b, a1))
a2 = dout - matmul(matrix_temp, grad_b)
if matrix.dtype in (mstype.complex64, mstype.complex128):
a2_temp = conjugate_transpose(a2, perm)
else:
a2_temp = _transpose(a2, perm)
a2 = matmul(tmp, a2_temp)
grad_a = add(a1, a2)
return (grad_a, grad_b, scalar2tensor(0, l2.dtype))
if fast is False:
raise ValueError("For MatrixSolveLs, gradient not defined for fast=False")
matrix_shape = shape(matrix)[-2:]
if matrix_shape[-2] >= matrix_shape[-1]:
return over_determined(matrix, rhs, out, l2, dout)
return under_determined(matrix, rhs, l2, dout)
return bprop
@bprop_getters.register(NanToNum)
def get_bprop_nan_to_num(self):
"""Grad definition for `NanToNum` operation."""
isfinite = P.IsFinite()
def bprop(x, out, dout):
dx = dout * isfinite(x)
return (dx,)
return bprop
@bprop_getters.register(Angle)
def get_bprop_angle(self):
"""Grad definition for `Angle` operation."""
real_op = Real()
imag_op = Imag()
reciprocal_op = P.Reciprocal()
complex_op = Complex()
neg_op = P.Neg()
def bprop(x, out, dout):
re = real_op(x)
im = imag_op(x)
re = complex_op(im, re)
z = reciprocal_op(re)
zero = zeros_like(dout)
complex_dout = complex_op(dout, zero)
return (neg_op(complex_dout * z),)
return bprop
@bprop_getters.register(Polar)
def get_bprop_polar(self):
"""Grad definition for `Polar` operation."""
complex_op = Complex()
conj = P.Conj()
real = P.Real()
sig = P.Sign()
ones = P.Ones()
zeros = P.Zeros()
def bprop(input1, angle, out, dout):
grad_conj = conj(dout)
zero = zeros(dout.shape, input1.dtype)
one = ones(dout.shape, input1.dtype)
i = complex_op(zero, one)
grad_abs = real(grad_conj * sig(out))
result_mul_1_j = out * i
grad_angle = real(grad_conj * result_mul_1_j)
return (grad_abs, grad_angle)
return bprop
@bprop_getters.register(Fmin)
def get_bprop_fmin(self):
"""Grad definition for 'Fmin' operation"""
shape_ = P.Shape()
masked_fill_op = P.MaskedFill()
logical_or_op = P.LogicalOr()
logical_not_op = P.LogicalNot()
logical_and_op = P.LogicalAnd()
mul_op = P.Mul()
is_nan_op = P.IsNan()
reshape_ = P.Reshape()
def bprop(x1, x2, out, dout):
x1_dtype = F.dtype(x1)
x2_dtype = F.dtype(x2)
x1 = F.cast(x1, mstype.float32)
x2 = F.cast(x2, mstype.float32)
dout = F.cast(dout, mstype.float32)
b1 = logical_or_op((x1 <= x2), is_nan_op(x2))
b2 = logical_or_op((x2 < x1), logical_and_op(is_nan_op(x1), logical_not_op(is_nan_op(x2))))
rx1 = masked_fill_op(x1, b1, 1.)
rx1 = masked_fill_op(rx1, logical_not_op(b1), 0.)
rx2 = masked_fill_op(x2, b2, 1.)
rx2 = masked_fill_op(rx2, logical_not_op(b2), 0.)
rrx1 = mul_op(rx1, dout)
rrx2 = mul_op(rx2, dout)
shape_of_x1 = shape_(x1)
shape_of_x2 = shape_(x2)
x1_dim = len(shape_of_x1)
x2_dim = len(shape_of_x2)
if x1_dim == 0 and x2_dim != 0:
sum_r1 = rrx1.sum()
sum_r2 = rrx2
elif x1_dim == 0 and x2_dim == 0:
sum_r1 = rrx1.sum()
sum_r2 = rrx2.sum()
elif x1_dim != 0 and x2_dim == 0:
sum_r2 = rrx2.sum()
sum_r1 = rrx1
else:
rx, ry = DynamicBroadcastGradientArgs()(shape_of_x1, shape_of_x2)
sum_r1 = sum_grad_reduce_axis(rrx1, rx)
sum_r2 = sum_grad_reduce_axis(rrx2, ry)
brrx1 = reshape_(sum_r1, shape_of_x1)
brrx2 = reshape_(sum_r2, shape_of_x2)
brrx1 = F.cast(brrx1, x1_dtype)
brrx2 = F.cast(brrx2, x2_dtype)
return brrx1, brrx2
return bprop
@bprop_getters.register(Fmax)
def get_bprop_fmax(self):
"""Grad definition for 'Fmax' operation"""
shape_ = P.Shape()
masked_fill_op = P.MaskedFill()
logical_or_op = P.LogicalOr()
logical_not_op = P.LogicalNot()
logical_and_op = P.LogicalAnd()
mul_op = P.Mul()
is_nan_op = P.IsNan()
reshape_ = P.Reshape()
def bprop(x1, x2, out, dout):
x1_dtype = F.dtype(x1)
x2_dtype = F.dtype(x2)
if x1_dtype != mstype.float32:
x1 = F.cast(x1, mstype.float32)
dout = F.cast(dout, mstype.float32)
if x2_dtype != mstype.float32:
x2 = F.cast(x2, mstype.float32)
dout = F.cast(dout, mstype.float32)
b1 = logical_or_op(logical_and_op((x1 >= x2), logical_not_op(is_nan_op(x1))), is_nan_op(x2))
b2 = logical_or_op(logical_and_op(x2 > x1, logical_not_op(is_nan_op(x2))),
logical_and_op(is_nan_op(x1), logical_not_op(is_nan_op(x2))))
rx1 = masked_fill_op(x1, b1, 1.)
rx1 = masked_fill_op(rx1, logical_not_op(b1), 0.)
rx2 = masked_fill_op(x2, b2, 1.)
rx2 = masked_fill_op(rx2, logical_not_op(b2), 0.)
rrx1 = mul_op(rx1, dout)
rrx2 = mul_op(rx2, dout)
shape_of_x1 = shape_(x1)
shape_of_x2 = shape_(x2)
x1_dim = len(shape_of_x1)
x2_dim = len(shape_of_x2)
if x1_dim == 0 and x2_dim != 0:
sum_r1 = rrx1.sum()
sum_r2 = rrx2
elif x1_dim == 0 and x2_dim == 0:
sum_r1 = rrx1.sum()
sum_r2 = rrx2.sum()
elif x1_dim != 0 and x2_dim == 0:
sum_r2 = rrx2.sum()
sum_r1 = rrx1
else:
rx, ry = DynamicBroadcastGradientArgs()(shape_of_x1, shape_of_x2)
sum_r1 = sum_grad_reduce_axis(rrx1, rx)
sum_r2 = sum_grad_reduce_axis(rrx2, ry)
brrx1 = reshape_(sum_r1, shape_of_x1)
brrx2 = reshape_(sum_r2, shape_of_x2)
brrx1 = F.cast(brrx1, x1_dtype)
brrx2 = F.cast(brrx2, x2_dtype)
return brrx1, brrx2
return bprop
@bprop_getters.register(TridiagonalSolve)
def get_bprop_tridiagonalsolve(self):
"""Grad definition for 'TridiagonalSolve' operation"""
tridiagonalsolve = TridiagonalSolve()
def bprop(diagonals, rhs, out, dout):
diags = diagonals
diag1 = diags[..., 1, :]
zeros1 = P.Zeros()(diags.shape[:-2] + (1,), diags.dtype)
superdiag1 = P.Concat(-1)((diags[..., 2, 1:], zeros1))
subdiag1 = P.Concat(-1)((zeros1, diags[..., 0, :-1]))
diags_transposed = P.Stack(-2)([superdiag1, diag1, subdiag1])
grad_rhs = tridiagonalsolve(diags_transposed, dout)
diag2 = P.ReduceSum()(grad_rhs * out, -1)
zeros2 = P.Zeros()(grad_rhs.shape[:-2] + (1, grad_rhs.shape[-1]), grad_rhs.dtype)
superdiag2 = P.ReduceSum()(grad_rhs * P.Concat(-2)((out[..., 1:, :], zeros2)), -1)
subdiag2 = P.ReduceSum()(grad_rhs * P.Concat(-2)((zeros2, out[..., :-1, :])), -1)
a = (P.Stack(-2)([superdiag2, diag2, subdiag2]))
grad_diags = 0 - a
return grad_diags, grad_rhs
return bprop
@bprop_getters.register(Lgamma)
def get_bprop_lgamma(self):
"""Grad definition for `Lgamma` operation."""
digamma = Digamma()
def bprop(x, out, dout):
if x.dtype in (mstype.float16,):
x = F.cast(x, mstype.float32)
dx = dout * digamma(x)
dx = F.cast(dx, mstype.float16)
elif x.dtype in (mstype.int32,):
x = F.cast(x, mstype.float32)
dx = dout * digamma(x)
else:
dx = dout * digamma(x)
return (dx,)
return bprop
@bprop_getters.register(Digamma)
def get_bprop_digamma(self):
"""Grad definition for `Digamma` operation."""
polygamma = Polygamma()
a = Tensor(1)
def bprop(x, out, dout):
if x.dtype in (mstype.float16,):
x = F.cast(x, mstype.float32)
dx = dout * polygamma(a, x)
dx = F.cast(dx, mstype.float16)
else:
dx = dout * polygamma(a, x)
return (dx,)
return bprop
@bprop_getters.register(Polygamma)
def get_bprop_polygamma(self):
"""Grad definition for `Polygamma` operation."""
polygamma = Polygamma()
def bprop(a, x, out, dout):
one = Tensor(1)
a = a + one
if x.dtype in (mstype.float16,):
x = F.cast(x, mstype.float64)
dx = dout * polygamma(a, x)
dx = F.cast(dx, mstype.float16)
else:
dx = dout * polygamma(a, x)
return zeros_like(a), dx
return bprop
@bprop_getters.register(CholeskySolve)
def get_bprop_cholesky_solve(self):
"""Grad definition for 'CholeskySolve' operation"""
batchmatmul_op = P.BatchMatMul()
matmul_op = P.MatMul()
neg_op = P.Neg()
upper = self.upper
cholesky_solve = CholeskySolve(upper=self.upper)
rank = P.Rank()
def bprop(x1, x2, out, dout):
flag = 0
len_x1 = rank(x1)
if dout.dtype == mstype.float64:
flag = 1
x2 = F.cast(x2, mstype.float32)
out = F.cast(out, mstype.float32)
dout = F.cast(dout, mstype.float32)
dx1 = cholesky_solve(dout, x2)
if len_x1 == 2:
common_term = matmul_op(dx1, transpose(out, (1, 0)))
common_term = common_term + transpose(common_term, (1, 0))
if upper is True:
dx2 = neg_op(matmul_op(x2, common_term))
else:
dx2 = neg_op(matmul_op(common_term, x2))
else:
x2_dim_size = len(shape_op(x2))
x2_dim_order = list(range(x2_dim_size))
target_order = x2_dim_order[:-2] + x2_dim_order[-2:][::-1]
common_term = batchmatmul_op(dx1, transpose(out, tuple(target_order)))
common_term = common_term + transpose(common_term, tuple(target_order))
if upper is True:
dx2 = neg_op(batchmatmul_op(x2, common_term))
else:
dx2 = neg_op(batchmatmul_op(common_term, x2))
if flag == 1:
dx1 = F.cast(dx1, mstype.float64)
dx2 = F.cast(dx2, mstype.float64)
return dx1, dx2
return bprop
@bprop_getters.register(Diagonal)
def get_bprop_diagonal(self):
"""Grad definition for 'Diagonal' operation"""
offset = self.offset
dim1 = self.dim1
dim2 = self.dim2
zeros_op = P.FillV2()
size_op = P.Size()
transpose_op = Transpose()
matrix_set_diag_op = MatrixSetDiagV3(align="LEFT_RIGHT")
def bprop(x, out, dout):
x_shape = x.shape
x_dtype = x.dtype
x_dim = len(x_shape)
if dim1 < 0:
dim1_ = dim1 + x_dim
else:
dim1_ = dim1
if dim2 < 0:
dim2_ = dim2 + x_dim
else:
dim2_ = dim2
if size_op(out):
batch_dim = out.shape[:-1]
diag_plane = (x_shape[dim1_], x_shape[dim2_])
dx_trans_shape = batch_dim + diag_plane
value = Tensor(0, x_dtype)
dx = zeros_op(dx_trans_shape, value)
k = F.cast(offset, mstype.int32)
dx = matrix_set_diag_op(dx, dout, k)
dim = 0
perm = ()
for i in range(x_dim):
if i == dim1_:
perm = perm + (x_dim - 2,)
elif i == dim2_:
perm = perm + (x_dim - 1,)
else:
perm = perm + (dim,)
dim = dim + 1
dx = transpose_op(dx, perm)
else:
dx = zeros_like(x)
return (dx,)
return bprop
@bprop_getters.register(Cholesky)
def get_bprop_cholesky(self):
"""Grad definition for `Cholesky` operation."""
upper = self.upper
choleskygrad = G.CholeskyGrad()
def bprop(x, out, dout):
out = cholesky_transpose(out) if upper else out
dout = cholesky_transpose(dout) if upper else dout
dx = choleskygrad(out, dout)
return (dx,)
return bprop
@bprop_getters.register(InplaceIndexAdd)
def get_bprop_inplace_index_add(self):
"""Generate bprop for InplaceIndexAdd"""
gather = P.Gather()
_axis = self.axis
def bprop(var, indices, updates, out, dout):
return dout, zeros_like(indices), gather(dout, indices, _axis)
return bprop
@bprop_getters.register(P.Zeta)
def get_bprop_zeta(self):
"""Generate bprop for Zeta"""
zeta = P.Zeta()
def bprop(x, q, out, dout):
dq = -x * zeta(x + 1, q) * dout
return zeros_like(x), dq
return bprop
@_primexpr
def _fft_rank_offset(norm_shape, rank):
"""generate offset for fft with rank"""
norm_shape_product = 1
for i in norm_shape[-rank:]:
norm_shape_product *= i
return norm_shape_product
@_primexpr
def _fft_with_size_back_norm(norm_shape, norm, inverse, rank):
"""generate reverse term for fft_with_size"""
if inverse is False:
if norm == "forward":
norm_ = 1 / _fft_rank_offset(norm_shape, rank)
if norm == "backward":
norm_ = 1 * _fft_rank_offset(norm_shape, rank)
if norm == "ortho":
norm_ = 1
if inverse is True:
if norm == "forward":
norm_ = 1 * _fft_rank_offset(norm_shape, rank)
if norm == "backward":
norm_ = 1 / _fft_rank_offset(norm_shape, rank)
if norm == "ortho":
norm_ = 1
return norm_
@_primexpr
def _rfft_norm(norm_shape, norm, rank):
"""generate norm for rfft"""
norm_ = 1.0
if norm == "forward":
norm_ = 1 / _fft_rank_offset(norm_shape, rank)
if norm == "backward":
norm_ = 1
if norm == "ortho":
norm_ = 1 / np.sqrt(_fft_rank_offset(norm_shape, rank))
return norm_
@_primexpr
def _get_last_dim_slice_shape(tensor_shape, index):
"""generate shape for slice last tensor"""
from_shape = [0 for x in tensor_shape]
if index < 0:
from_shape[-1] = tensor_shape[-1] + index
else:
from_shape[-1] = index
to_shape = list(tensor_shape)
to_shape[-1] = 1
return tuple(from_shape), tuple(to_shape)
@_primexpr
def _rfft_reshape(shape_a, shape_b):
"""generate rfft shape for reshape"""
new_shape = list(shape_b)
for i in range(len(shape_a) - 2):
new_shape.insert(i, 1)
return tuple(new_shape)
@_primexpr
def _rfft_tile_reshape(shape_a):
"""generate rfft shape for tile"""
reshape_a = list(shape_a)
reshape_a[-2] = 1
reshape_a[-1] = 1
return tuple(reshape_a)
@_primexpr
def _rfft_last_term_shape(shape_a, shape_b):
"""generate rfft shape for last term"""
new_shape = list(shape_b)
for i in range(len(shape_a) - 1):
new_shape.insert(i, 1)
return tuple(new_shape)
@_primexpr
def _batch_matmul_shape_increase(shape_before):
"""increase tensor shape for batch_matmul"""
return (1, *shape_before)
@_primexpr
def _batch_matmul_shape_decrease(matrix_shape):
"""decrease tensor shape after batch_matmul"""
shape_tmp = list(matrix_shape)
shape_tmp[-1] = 1
return tuple(shape_tmp)
@bprop_getters.register(FFTWithSize)
def get_bprop_fft_with_size(self):
"""Grad definition for `FFTWithSize` operation."""
signal_ndim = self.signal_ndim
inverse = self.inverse
real = self.real
norm = self.norm
onesided = self.onesided
fft_fn = FFTWithSize(signal_ndim=signal_ndim,
inverse=False,
real=False,
norm=norm)
ifft_fn = FFTWithSize(signal_ndim=signal_ndim,
inverse=True,
real=False,
norm=norm)
rfft_fn = FFTWithSize(signal_ndim=signal_ndim,
inverse=False,
real=True,
norm=norm,
onesided=onesided)
irfft_fn = FFTWithSize(signal_ndim=signal_ndim,
inverse=True,
real=True,
norm=norm,
onesided=onesided)
complex_op = P.Complex()
to_tensor_op = P.ScalarToTensor()
type_op = P.DType()
concat_op = P.Concat()
ones_op = P.Ones()
zeros_op = P.Zeros()
real_op = P.Real()
imag_op = P.Imag()
slice_op = P.Slice()
tile_op = P.Tile()
expand_dims = P.ExpandDims()
transpose_op = P.Transpose()
exp_op = P.Exp()
reshape_op = P.Reshape()
conj_op = P.Conj()
batch_matmul_op = P.BatchMatMul()
def bprop(x, out, dout):
dx = 0
input_type = type_op(x)
output_type = type_op(out)
input_shape = shape_op(x)
offset_shape = shape_op(x)
dout_shape = shape_op(dout)
offset_size = to_tensor_op(_fft_with_size_back_norm(offset_shape, norm, inverse, signal_ndim), output_type)
if real is False:
if inverse is False:
dx = ifft_fn(dout) * offset_size
else:
dx = fft_fn(dout) * offset_size
else:
irfft_ = FFTWithSize(signal_ndim=1, inverse=True, real=True, norm="backward", onesided=onesided,
signal_sizes=offset_shape[-1:])
irfft2d_ = FFTWithSize(signal_ndim=2, inverse=True, real=True, norm="backward", onesided=onesided,
signal_sizes=offset_shape[-2:])
irfft3d_ = FFTWithSize(signal_ndim=3, inverse=True, real=True, norm="backward", onesided=onesided,
signal_sizes=offset_shape[-3:])
if inverse is False:
if onesided is True:
terms = 0
is_even = to_tensor_op(1 - (input_shape[-1] % 2), input_type)
dout_first_from, dout_first_to = _get_last_dim_slice_shape(dout_shape, 0)
dout_first = slice_op(dout, dout_first_from, dout_first_to)
rfft_offset_size = to_tensor_op(_fft_rank_offset(input_shape, signal_ndim), input_type)
rfft_norm_offset = to_tensor_op(_rfft_norm(input_shape, norm, signal_ndim), input_type)
dout_last_from, dout_last_to = _get_last_dim_slice_shape(dout_shape, -1)
dout_last = slice_op(dout, dout_last_from, dout_last_to)
if signal_ndim == 1:
dx = irfft_(dout)
vec_mask = complex_op(1 - 2 * (mnp.arange(0, input_shape[-1], 1, input_type) % 2),
zeros_op(input_shape[-1], input_type))
terms = real_op(dout_first) + is_even * real_op(dout_last * vec_mask)
elif signal_ndim == 2:
dx = irfft2d_(dout)
arange_inner = mnp.arange(0, input_shape[-2], 1, input_type)
matrix_a = tile_op(expand_dims(arange_inner, 0), (input_shape[-2], 1))
matrix_b = transpose_op(matrix_a, (1, 0))
matrix_mul = matrix_a * matrix_b
imag_offset = complex_op(to_tensor_op(0, input_type), to_tensor_op(-2, input_type))
pi_tensor = to_tensor_op(mnp.pi, output_type)
matrix_mul_complex = complex_op(matrix_mul, zeros_op(shape_op(matrix_mul), input_type))
matrix_base_mask = exp_op(imag_offset * pi_tensor * matrix_mul_complex /
to_tensor_op(input_shape[-2], output_type))
expanded_matrix_mask = reshape_op(matrix_base_mask, _rfft_reshape(shape_op(dout_first),
shape_op(matrix_base_mask)))
tile_matrix_mask = complex_op(tile_op(real_op(expanded_matrix_mask), _rfft_tile_reshape(
shape_op(dout_first))), tile_op(imag_op(expanded_matrix_mask),
_rfft_tile_reshape(shape_op(dout_first))))
tile_matrix_mask_shape = shape_op(tile_matrix_mask)
dout_first_term = reshape_op(batch_matmul_op(reshape_op(tile_matrix_mask,
_batch_matmul_shape_increase(
tile_matrix_mask_shape)),
reshape_op(conj_op(
dout_first), _batch_matmul_shape_increase(
shape_op(dout_first)))),
_batch_matmul_shape_decrease(tile_matrix_mask_shape))
dout_last_term = reshape_op(batch_matmul_op(reshape_op(tile_matrix_mask,
_batch_matmul_shape_increase(
tile_matrix_mask_shape)),
reshape_op(conj_op(dout_last),
_batch_matmul_shape_increase(
shape_op(dout_last)))),
_batch_matmul_shape_decrease(
tile_matrix_mask_shape))
vec_mask = complex_op(1 - 2 * (mnp.arange(0, input_shape[-1], 1, input_type) % 2), zeros_op(
input_shape[-1], input_type))
dout_last_term = complex_op(tile_op(real_op(dout_last_term), _rfft_last_term_shape(dout_shape,
[input_shape[
-1]])),
tile_op(imag_op(dout_last_term), _rfft_last_term_shape(
dout_shape, [input_shape[-1]])))
dout_last_term = dout_last_term * vec_mask
terms = real_op(dout_first_term) + is_even * real_op(dout_last_term)
elif signal_ndim == 3:
dx = irfft3d_(dout) * real_op(offset_size)
dx = to_tensor_op(0.5, input_type) * (dx * rfft_offset_size + terms) * rfft_norm_offset
else:
dx = irfft_fn(dout) * real_op(offset_size)
else:
dx = rfft_fn(dout)
if onesided is True:
if signal_ndim != 3:
is_odd = dout_shape[-1] % 2
last_shape = offset_shape[-1]
mask = concat_op((ones_op(1, output_type), 2.0 * ones_op(
(last_shape - 2 + is_odd,), output_type), ones_op((1 - is_odd,), output_type)))
dx = dx * complex_op(mask, zeros_op(shape_op(mask), output_type))
irfft_offset_size = to_tensor_op(
_fft_with_size_back_norm(shape_op(dout), norm, inverse, signal_ndim),
output_type)
dx = dx * complex_op(irfft_offset_size, zeros_op(1, output_type))
else:
dx = dx * complex_op(offset_size, zeros_op(1, output_type))
else:
dx = dx * complex_op(offset_size, zeros_op(1, output_type))
return (dx,)
return bprop
def dyn_binop_grad_common(x, y, dx, dy):
"""
Common grad definition for binary operations when the input is dynamic shape.
The function is usually used in backprop op to reduce additional dimensions created by broadcasting.
"""
shape_of_x = shape_op(x)
shape_of_y = shape_op(y)
rx, ry = DynamicBroadcastGradientArgs()(shape_of_x, shape_of_y)
dx_origin_dtype = dx.dtype
if dx_origin_dtype in (mstype.int16, mstype.int32, mstype.int64):
dx = F.cast(dx, mstype.float32)
dx = sum_grad_reduce_axis(dx, rx)
dx = F.cast(dx, dx_origin_dtype)
else:
dx = sum_grad_reduce_axis(dx, rx)
dy_origin_dtype = dy.dtype
if dy_origin_dtype in (mstype.int16, mstype.int32, mstype.int64):
dy = F.cast(dy, mstype.float32)
dy = sum_grad_reduce_axis(dy, ry)
dy = F.cast(dy, dy_origin_dtype)
else:
dy = sum_grad_reduce_axis(dy, ry)
reduce_dx = reshape(dx, shape_of_x)
reduce_dy = reshape(dy, shape_of_y)
return reduce_dx, reduce_dy
def dyn_binop_grad_common_with_shift(x, y, dx, dy, shift):
"""
Common grad definition for binary operations with shift when the input is dynamic shape.
The function is usually used in backprop op to reduce additional dimensions created by broadcasting.
"""
shape_of_x = shape_op(x)
shape_of_y = shape_op(y)
broadcast_shape_of_x = shape_of_x[:-shift]
broadcast_shape_of_y = shape_of_y[:-shift]
rx, ry = DynamicBroadcastGradientArgs()(broadcast_shape_of_x, broadcast_shape_of_y)
dx = sum_grad_reduce_axis(dx, rx)
dy = sum_grad_reduce_axis(dy, ry)
reduce_dx = reshape(dx, shape_of_x)
reduce_dy = reshape(dy, shape_of_y)
return reduce_dx, reduce_dy
def _reduce_sum_with_cast(dx, axis):
dx_origin_dtype = dx.dtype
# Currently, for Ascend and GPU, the reduce_sum's input does not support int16, int32 and int64.
if dx_origin_dtype in (mstype.int16, mstype.int32, mstype.int64):
dx = F.cast(dx, mstype.float32)
dx = reduce_sum(dx, axis)
return F.cast(dx, dx_origin_dtype)
return reduce_sum(dx, axis)
def binop_grad_common(x, y, dx, dy):
"""
Common grad definition for binary operations.
The function is usually used in backprop op to reduce additional dimensions created by broadcasting.
"""
shape_of_x = shape_op(x)
shape_of_y = shape_op(y)
# if input shape is the same as dout shape, do not need to reduce
reduce_dx = dx
reduce_dy = dy
if not (F.is_sequence_value_unknown(shape_of_x) or F.is_sequence_value_unknown(shape_of_y)):
rx = broadcast_gradient_args(shape_of_x, shape_of_y)
if rx[0]:
# if dx is scalar whose shape is (), do not need reduce
if shape_op(dx):
dx = _reduce_sum_with_cast(dx, rx[0])
reduce_dx = reshape(dx, shape_of_x)
if rx[1]:
# if dy is scalar whose shape is (), do not need reduce
if shape_op(dy):
dy = _reduce_sum_with_cast(dy, rx[1])
reduce_dy = reshape(dy, shape_of_y)
return reduce_dx, reduce_dy
if not isinstance(shape_of_x, tuple) or not isinstance(shape_of_y, tuple):
# x or y is scalar
if not isinstance(shape_of_x, tuple):
reduce_dx = _reduce_sum_with_cast(dx, ())
if not isinstance(shape_of_y, tuple):
reduce_dy = _reduce_sum_with_cast(dy, ())
return reduce_dx, reduce_dy
return dyn_binop_grad_common(x, y, dx, dy)
def binop_grad_common_with_shift(x, y, dx, dy, shift):
"""
Common grad definition for binary operations with shift.
The function is usually used in backprop op to reduce additional dimensions created by broadcasting.
"""
shape_of_x = shape_op(x)
shape_of_y = shape_op(y)
broadcast_shape_of_x = shape_of_x[:-shift]
broadcast_shape_of_y = shape_of_y[:-shift]
# if input shape is the same as dout shape, do not need to reduce
reduce_dx = dx
reduce_dy = dy
if not (F.is_sequence_value_unknown(broadcast_shape_of_x) or F.is_sequence_value_unknown(broadcast_shape_of_y)):
rx = broadcast_gradient_args(broadcast_shape_of_x, broadcast_shape_of_y)
if rx[0]:
# if dx is scalar whose shape is (), do not need reduce
if shape_op(dx):
dx = _reduce_sum_with_cast(dx, rx[0])
reduce_dx = reshape(dx, shape_of_x)
if rx[1]:
# if dy is scalar whose shape is (), do not need reduce
if shape_op(dy):
dy = _reduce_sum_with_cast(dy, rx[1])
reduce_dy = reshape(dy, shape_of_y)
return reduce_dx, reduce_dy
if not isinstance(shape_of_x, tuple) or not isinstance(shape_of_y, tuple):
# x or y is scalar
if not isinstance(shape_of_x, tuple):
reduce_dx = _reduce_sum_with_cast(dx, ())
if not isinstance(shape_of_y, tuple):
reduce_dy = _reduce_sum_with_cast(dy, ())
return reduce_dx, reduce_dy
return dyn_binop_grad_common_with_shift(x, y, dx, dy, shift)
@bprop_getters.register(P.TensorAdd)
def get_bprop_tensor_add(self):
"""Grad definition for `Add` operation."""
def bprop(x, y, out, dout):
return binop_grad_common(x, y, dout, dout)
return bprop
@bprop_getters.register(P.BitwiseAnd)
def get_bprop_bitwiseand(self):
"""Grad definition for `BitwiseAnd` operation."""
def bprop(x, y, out, dout):
return zeros_like(x), zeros_like(y)
return bprop
@bprop_getters.register(P.BitwiseOr)
def get_bprop_bitwiseor(self):
"""Grad definition for `BitwiseOr` operation."""
def bprop(x, y, out, dout):
return zeros_like(x), zeros_like(y)
return bprop
@bprop_getters.register(P.BitwiseXor)
def get_bprop_bitwisexor(self):
"""Grad definition for `BitwiseXor` operation."""
def bprop(x, y, out, dout):
return zeros_like(x), zeros_like(y)
return bprop
@bprop_getters.register(P.InplaceUpdate)
def get_bprop_inplace_update(self):
"""Grad definition for `InplaceUpdate` operation."""
def bprop(x, v, out, dout):
return zeros_like(x), zeros_like(v)
return bprop
@bprop_getters.register(P.InplaceUpdateV2)
def get_bprop_inplace_update_v2(self):
"""Grad definition for `InplaceUpdateV2` operation."""
def bprop(x, indices, v, out, dout):
return zeros_like(x), zeros_like(indices), zeros_like(v)
return bprop
@bprop_getters.register(P.InplaceSub)
def get_bprop_inplace_sub(self):
"""Grad definition for `InplaceSub` operation."""
def bprop(x, input_v, out, dout):
return zeros_like(x), zeros_like(input_v)
return bprop
@bprop_getters.register(P.InplaceAdd)
def get_bprop_inplace_add(self):
"""Grad definition for `InplaceAdd` operation."""
def bprop(x, input_v, out, dout):
return zeros_like(x), zeros_like(input_v)
return bprop
|
cc2aab0fec1224f20f5bdfd1102542ceab656d51
|
568fa58296378fa129ab3349adf010daa44ed45b
|
/third_party/incubator-tvm/vta/python/vta/top/nnvm_op.py
|
a38b2172671bba292818a6a242760a04f5684148
|
[
"Apache-2.0",
"BSD-3-Clause",
"NCSA",
"X11-distribute-modifications-variant",
"Zlib",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"LLVM-exception",
"BSD-2-Clause"
] |
permissive
|
mindspore-ai/akg
|
37f471badc66de6a831f1f45ad84344f34d23ef2
|
99f33858d6972741748cbfc9ab0bf9600428fef7
|
refs/heads/master
| 2023-07-25T23:03:17.672665
| 2023-07-11T07:33:57
| 2023-07-11T07:33:57
| 274,077,856
| 319
| 36
|
Apache-2.0
| 2021-12-30T13:43:08
| 2020-06-22T08:09:05
|
Python
|
UTF-8
|
Python
| false
| false
| 5,057
|
py
|
nnvm_op.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Namespace for supporting packed_conv2d + ewise variant of nnvm."""
from __future__ import absolute_import as _abs
import logging
import tvm
import topi
from nnvm.top import registry as reg, OpPattern
from nnvm.top import nn as _nn
from .vta_conv2d import is_packed_layout
from ..environment import get_env
@tvm.register_func("nnvm.compiler.build_target", override=True)
def _build(funcs, target, target_host):
tvm_t = tvm.target.create(target)
if tvm_t.device_name == "vta":
return tvm.build(funcs, target="ext_dev", target_host=target_host)
if tvm_t.device_name == "rasp" or tvm_t.device_name == "vtacpu":
return tvm.build(funcs, target=target_host)
return tvm.build(funcs, target=target)
@tvm.register_func("nnvm.compiler.lower", override=True)
def _lower(sch, inputs, func_name, graph):
import traceback
# pylint: disable=broad-except
try:
f = tvm.lower(sch, inputs, name=func_name)
if "quantized_conv2d" in func_name:
logging.info(graph.ir(join_entry_attrs=["shape"]))
except Exception:
msg = traceback.format_exc()
msg += "Error during compile graph\n"
msg += "--------------------------\n"
msg += graph.ir(join_entry_attrs=["shape"])
raise RuntimeError(msg)
return f if isinstance(
f, (tvm.container.Array, tuple, list)) else [f]
# override to force partition at copy
reg.register_pattern("copy", OpPattern.INJECTIVE, level=15)
@reg.register_compute("clip", level=15)
def compute_clip(attrs, inputs, _):
""" Clip operator. """
x = inputs[0]
a_min = attrs.get_float("a_min")
a_max = attrs.get_float("a_max")
const_min = tvm.const(a_min, x.dtype)
const_max = tvm.const(a_max, x.dtype)
with tvm.tag_scope(topi.tag.ELEMWISE):
x = tvm.compute(
x.shape, lambda *i: tvm.min(x(*i), const_max), name="clipA")
x = tvm.compute(
x.shape, lambda *i: tvm.max(x(*i), const_min), name="clipB")
return x
@reg.register_compute("conv2d", level=15)
def compute_conv2d(attrs, inputs, out):
""" Compute definition of conv2d """
padding = attrs.get_int_tuple("padding")
strides = attrs.get_int_tuple("strides")
dilation = attrs.get_int_tuple("dilation")
groups = attrs.get_int("groups")
layout = attrs["layout"]
out_dtype = attrs['out_dtype']
assert dilation == (1, 1), "not support dilate now"
if is_packed_layout(layout):
if groups == 1:
assert groups == 1
env = get_env()
assert env.LOG_INP_WIDTH == 3, "only support 8bit inp for now"
assert env.LOG_OUT_WIDTH == 3, "only support 8bit inp for now"
inputs = list(inputs)
assert inputs[1].dtype == "int8"
return topi.nn.conv2d(inputs[0], inputs[1], strides,
padding, dilation, layout, out_dtype)
return topi.nn.group_conv2d_nchw(inputs[0], inputs[1], strides,
padding, dilation, groups, out_dtype)
with tvm.target.arm_cpu(tvm.target.current_target().model):
return _nn.compute_conv2d(attrs, inputs, out)
@reg.register_schedule("conv2d", level=15)
def schedule_conv2d(attrs, outs, target):
""" Schedule definition of conv2d """
layout = attrs["layout"]
groups = attrs.get_int('groups')
if is_packed_layout(layout):
target = tvm.target.create(target)
if target.device_name == "vta":
if groups == 1:
return topi.generic.schedule_conv2d_nchw(outs)
return topi.generic.schedule_group_conv2d_nchw(outs)
elif str(target).startswith("llvm"):
return tvm.create_schedule([x.op for x in outs])
else:
raise RuntimeError("not support target %s" % target)
with tvm.target.arm_cpu(tvm.target.current_target().model):
return _nn.schedule_conv2d(attrs, outs, tvm.target.current_target())
@reg.register_alter_op_layout("conv2d", level=15)
def alter_conv2d_layout(attrs, inputs, out):
layout = attrs['layout']
if is_packed_layout(layout):
return None
with tvm.target.arm_cpu(tvm.target.current_target().model):
return _nn.alter_conv2d_layout(attrs, inputs, out)
|
36a0812029814b3dfd8f4831df19ff3f16a2315c
|
5ef6c8d47864f471e26b9902d61f8c687e941f05
|
/src/genie/libs/parser/iosxe/tests/ShowCryptoGdoiDetail/cli/equal/golden_output_expected.py
|
fb53b18d21310c127c88be8aff5ca4bf537bee4b
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genieparser
|
169c196558f1c1a0f0d10650876096f993224917
|
b531eff760b2e44cd69d7a2716db6f866907c239
|
refs/heads/master
| 2023-09-03T08:56:18.831340
| 2023-08-29T22:32:02
| 2023-08-29T22:32:02
| 131,621,824
| 247
| 409
|
Apache-2.0
| 2023-08-29T22:32:04
| 2018-04-30T16:51:50
|
Python
|
UTF-8
|
Python
| false
| false
| 4,954
|
py
|
golden_output_expected.py
|
expected_output={
"group_name":{
"getvpn1":{
"group_information":{
"crypto_path":"ipv4",
"group_identity":"1223",
"group_member":{
"3.3.1.1":{
"active_tek_num":1,
"allowable_rekey_cipher":"ESP",
"attempted_registration_count":1,
"dp_error_monitoring":"OFF",
"fail_close_revert":"Disabled",
"fvrf":"None",
"ipsec_init_reg_executed":0,
"ipsec_init_reg_postponed":0,
"ivrf":"None",
"last_rekey_seq_num":0,
"last_rekey_server":"1.1.1.1",
"local_addr":"3.3.1.1",
"local_addr_port":"848",
"pfs_rekey_received":0,
"re_register_time_sec":332,
"registration":"Registered",
"rekey_acks_sent":25,
"remote_addr":"1.1.1.1",
"remote_addr_port":848,
"sa_track":"disabled",
"server_ip":"1.1.1.1",
"succeeded_registration_count":1,
"uncicast_rekey_received":25,
"version":"1.0.26",
"vrf":"None"
}
},
"group_member_information":{
"acl_download_from_ks":{
"1.1.1.1":{
"acl_list":[
"access-list deny ip host 11.23.33.33 host 24.54.55.55",
"access-list deny ip host 41.23.32.37 host 44.58.59.55",
"access-list deny esp any any",
"access-list deny udp any any port = 3784",
"access-list deny udp any any port = 3785",
"access-list deny udp any port = 3785 any",
"access-list deny tcp any any port = 179",
"access-list deny tcp any port = 179 any",
"access-list deny tcp any any port = 22",
"access-list deny tcp any port = 22 any",
"access-list deny ospf any any",
"access-list deny pim any 224.0.0.0 0.0.0.255",
"access-list deny udp any any port = 123",
"access-list deny udp any any port = 514",
"access-list deny udp any port = 500 any port = 500",
"access-list deny udp any port = 848 any",
"access-list deny udp any any port = 848",
"access-list deny ip any 10.90.0.0 0.0.255.255",
"access-list deny ip 10.90.0.0 0.0.255.255 any",
"access-list permit ip 25.25.0.0 0.0.255.255 15.15.0.0 0.0.255.255",
"access-list permit ip 15.15.0.0 0.0.255.255 25.25.0.0 0.0.255.255",
"access-list permit ip 16.16.0.0 0.0.255.255 26.26.0.0 0.0.255.255",
]
}
},
"acl_received_from_ks":"gdoi_group_getvpn1_temp_acl",
"rekeys_cumulative":{
"after_latest_register":25,
"rekey_acks_sents":25,
"total_received":25
}
},
"group_server_list":"1.1.1.1",
"group_type":"GDOI (ISAKMP)",
"ipsec_sa_direction":"Both",
"kek_policy":{
"encrypt_algorithm":"AES",
"key_size":256,
"lifetime":992,
"rekey_transport_type":"Unicast",
"sig_hash_algorithm":"HMAC_AUTH_SHA",
"sig_key_length":4400
},
"key_management_path":"ipv4",
"kgs_policy":{
"reg_gm":{
"local_addr":"3.3.1.1"
}
},
"p2p_policy":{
"reg_gm":{
"local_addr":"3.3.1.1"
}
},
"rekeys_received":25,
"tek_policy":{
"interfaces":{
"GigabitEthernet0/0/1":{
"ipsec_sa":{
"spi":{
"0x5A69F51E(1516893470)":{
"alg_key_size_bytes":32,
"sig_key_size_bytes":32,
"anti_replay_count":64,
"encaps":"ENCAPS_TUNNEL",
"sa_remaining_key_lifetime":393,
"tag_method":"disabled",
"transform":"esp-256-aes esp-sha256-hmac"
},
}
}
}
}
}
}
}
}
}
|
0780573dac9315b31cb97e46af743f406bfc748d
|
8bc2bfc34352aac01bef774209e651e81c61c681
|
/pox/openflow/flow_table.py
|
4bbf79321bab029c68299bb14ac6f099a970435d
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
noxrepo/pox
|
0925260bc723cda49dd0dab7e827f61548e1370c
|
5f82461e01f8822bd7336603b361bff4ffbd2380
|
refs/heads/gar-experimental
| 2023-04-30T00:04:33.991794
| 2020-05-20T12:00:13
| 2020-05-20T12:00:13
| 3,382,021
| 467
| 370
|
Apache-2.0
| 2023-06-01T02:19:33
| 2012-02-07T22:18:46
|
Python
|
UTF-8
|
Python
| false
| false
| 12,049
|
py
|
flow_table.py
|
# Copyright 2011,2012,2013 Colin Scott
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Implementation of an OpenFlow flow table
"""
from .libopenflow_01 import *
from pox.lib.revent import *
import time
import math
# FlowTable Entries:
# match - ofp_match (13-tuple)
# counters - hash from name -> count. May be stale
# actions - ordered list of ofp_action_*s to apply for matching packets
class TableEntry (object):
"""
Models a flow table entry, with a match, actions, and options/flags/counters.
Note: The current time can either be specified explicitely with the optional
'now' parameter or is taken from time.time()
"""
def __init__ (self, priority=OFP_DEFAULT_PRIORITY, cookie=0, idle_timeout=0,
hard_timeout=0, flags=0, match=ofp_match(), actions=[],
buffer_id=None, now=None):
"""
Initialize table entry
"""
if now is None: now = time.time()
self.created = now
self.last_touched = self.created
self.byte_count = 0
self.packet_count = 0
self.priority = priority
self.cookie = cookie
self.idle_timeout = idle_timeout
self.hard_timeout = hard_timeout
self.flags = flags
self.match = match
self.actions = actions
self.buffer_id = buffer_id
@staticmethod
def from_flow_mod (flow_mod):
return TableEntry(priority=flow_mod.priority,
cookie=flow_mod.cookie,
idle_timeout=flow_mod.idle_timeout,
hard_timeout=flow_mod.hard_timeout,
flags=flow_mod.flags,
match=flow_mod.match,
actions=flow_mod.actions,
buffer_id=flow_mod.buffer_id)
def to_flow_mod (self, flags=None, **kw):
if flags is None: flags = self.flags
return ofp_flow_mod(priority=self.priority,
cookie=self.cookie,
match=self.match,
idle_timeout=self.idle_timeout,
hard_timeout=self.hard_timeout,
actions=self.actions,
buffer_id=self.buffer_id,
flags=flags, **kw)
@property
def effective_priority (self):
"""
Exact matches effectively have an "infinite" priority
"""
return self.priority if self.match.is_wildcarded else (1<<16) + 1
def is_matched_by (self, match, priority=None, strict=False, out_port=None):
"""
Tests whether a given match object matches this entry
Used for, e.g., flow_mod updates
If out_port is any value besides None, the the flow entry must contain an
output action to the specified port.
"""
match_a = lambda a: isinstance(a, ofp_action_output) and a.port == out_port
port_matches = (out_port is None) or any(match_a(a) for a in self.actions)
if strict:
return port_matches and self.match == match and self.priority == priority
else:
return port_matches and match.matches_with_wildcards(self.match)
def touch_packet (self, byte_count, now=None):
"""
Updates information of this entry based on encountering a packet.
Updates both the cumulative given byte counts of packets encountered and
the expiration timer.
"""
if now is None: now = time.time()
self.byte_count += byte_count
self.packet_count += 1
self.last_touched = now
def is_idle_timed_out (self, now=None):
if now is None: now = time.time()
if self.idle_timeout > 0:
if (now - self.last_touched) > self.idle_timeout:
return True
return False
def is_hard_timed_out (self, now=None):
if now is None: now = time.time()
if self.hard_timeout > 0:
if (now - self.created) > self.hard_timeout:
return True
return False
def is_expired (self, now=None):
"""
Tests whether this flow entry is expired due to its idle or hard timeout
"""
if now is None: now = time.time()
return self.is_idle_timed_out(now) or self.is_hard_timed_out(now)
def __str__ (self):
return type(self).__name__ + "\n " + self.show()
def __repr__ (self):
return "TableEntry(" + self.show() + ")"
def show (self):
outstr = ''
outstr += "priority=%s, " % self.priority
outstr += "cookie=%x, " % self.cookie
outstr += "idle_timeout=%d, " % self.idle_timeout
outstr += "hard_timeout=%d, " % self.hard_timeout
outstr += "match=<%s>, " % ((self.match.show().replace("\n"," ").strip()
if self.match else "Empty"),)
outstr += "actions=%s, " % (self.actions,)
outstr += "buffer_id=%s" % str(self.buffer_id)
return outstr
def flow_stats (self, now=None):
if now is None: now = time.time()
dur_nsec,dur_sec = math.modf(now - self.created)
return ofp_flow_stats(match=self.match,
duration_sec=int(dur_sec),
duration_nsec=int(dur_nsec * 1e9),
priority=self.priority,
idle_timeout=self.idle_timeout,
hard_timeout=self.hard_timeout,
cookie=self.cookie,
packet_count=self.packet_count,
byte_count=self.byte_count,
actions=self.actions)
def to_flow_removed (self, now=None, reason=None):
#TODO: Rename flow_stats to to_flow_stats and refactor?
if now is None: now = time.time()
dur_nsec,dur_sec = math.modf(now - self.created)
fr = ofp_flow_removed()
fr.match = self.match
fr.cookie = self.cookie
fr.priority = self.priority
fr.reason = reason
fr.duration_sec = int(dur_sec)
fr.duration_nsec = int(dur_nsec * 1e9)
fr.idle_timeout = self.idle_timeout
fr.hard_timeout = self.hard_timeout
fr.packet_count = self.packet_count
fr.byte_count = self.byte_count
return fr
class FlowTableModification (Event):
def __init__ (self, added=[], removed=[], reason=None):
self.added = added
self.removed = removed
# Reason for modification.
# Presently, this is only used for removals and is either one of OFPRR_x,
# or None if it does not correlate to any of the items in the spec.
self.reason = reason
class FlowTable (EventMixin):
"""
General model of a flow table.
Maintains an ordered list of flow entries, and finds matching entries for
packets and other entries. Supports expiration of flows.
"""
_eventMixin_events = set([FlowTableModification])
def __init__ (self):
EventMixin.__init__(self)
# Table is a list of TableEntry sorted by descending effective_priority.
self._table = []
def _dirty (self):
"""
Call when table changes
"""
pass
@property
def entries (self):
return self._table
def __len__ (self):
return len(self._table)
def add_entry (self, entry):
assert isinstance(entry, TableEntry)
#self._table.append(entry)
#self._table.sort(key=lambda e: e.effective_priority, reverse=True)
# Use binary search to insert at correct place
# This is faster even for modest table sizes, and way, way faster
# as the tables grow larger.
priority = entry.effective_priority
table = self._table
low = 0
high = len(table)
while low < high:
middle = (low + high) // 2
if priority >= table[middle].effective_priority:
high = middle
continue
low = middle + 1
table.insert(low, entry)
self._dirty()
self.raiseEvent(FlowTableModification(added=[entry]))
def remove_entry (self, entry, reason=None):
assert isinstance(entry, TableEntry)
self._table.remove(entry)
self._dirty()
self.raiseEvent(FlowTableModification(removed=[entry], reason=reason))
def matching_entries (self, match, priority=0, strict=False, out_port=None):
entry_match = lambda e: e.is_matched_by(match, priority, strict, out_port)
return [ entry for entry in self._table if entry_match(entry) ]
def flow_stats (self, match, out_port=None, now=None):
mc_es = self.matching_entries(match=match, strict=False, out_port=out_port)
return [ e.flow_stats(now) for e in mc_es ]
def aggregate_stats (self, match, out_port=None):
mc_es = self.matching_entries(match=match, strict=False, out_port=out_port)
packet_count = 0
byte_count = 0
flow_count = 0
for entry in mc_es:
packet_count += entry.packet_count
byte_count += entry.byte_count
flow_count += 1
return ofp_aggregate_stats(packet_count=packet_count,
byte_count=byte_count,
flow_count=flow_count)
def _remove_specific_entries (self, flows, reason=None):
#for entry in flows:
# self._table.remove(entry)
#self._table = [entry for entry in self._table if entry not in flows]
if not flows: return
self._dirty()
remove_flows = set(flows)
i = 0
while i < len(self._table):
entry = self._table[i]
if entry in remove_flows:
del self._table[i]
remove_flows.remove(entry)
if not remove_flows: break
else:
i += 1
assert len(remove_flows) == 0
self.raiseEvent(FlowTableModification(removed=flows, reason=reason))
def remove_expired_entries (self, now=None):
idle = []
hard = []
if now is None: now = time.time()
for entry in self._table:
if entry.is_idle_timed_out(now):
idle.append(entry)
elif entry.is_hard_timed_out(now):
hard.append(entry)
self._remove_specific_entries(idle, OFPRR_IDLE_TIMEOUT)
self._remove_specific_entries(hard, OFPRR_HARD_TIMEOUT)
def remove_matching_entries (self, match, priority=0, strict=False,
out_port=None, reason=None):
remove_flows = self.matching_entries(match, priority, strict, out_port)
self._remove_specific_entries(remove_flows, reason=reason)
return remove_flows
def entry_for_packet (self, packet, in_port):
"""
Finds the flow table entry that matches the given packet.
Returns the highest priority flow table entry that matches the given packet
on the given in_port, or None if no matching entry is found.
"""
packet_match = ofp_match.from_packet(packet, in_port, spec_frags = True)
for entry in self._table:
if entry.match.matches_with_wildcards(packet_match,
consider_other_wildcards=False):
return entry
return None
def check_for_overlapping_entry (self, in_entry):
"""
Tests if the input entry overlaps with another entry in this table.
Returns true if there is an overlap, false otherwise. Since the table is
sorted, there is only a need to check a certain portion of it.
"""
#NOTE: Assumes that entries are sorted by decreasing effective_priority
#NOTE: Ambiguous whether matching should be based on effective_priority
# or the regular priority. Doing it based on effective_priority
# since that's what actually affects packet matching.
#NOTE: We could improve performance by doing a binary search to find the
# right priority entries.
priority = in_entry.effective_priority
for e in self._table:
if e.effective_priority < priority:
break
elif e.effective_priority > priority:
continue
else:
if e.is_matched_by(in_entry.match) or in_entry.is_matched_by(e.match):
return True
return False
|
5caaf723395cada56b5355750684fe6b9541d9a9
|
97efdfac795c44f9a2a62e48ba71bf1783c523bf
|
/message_ix/tests/util/test_tutorial.py
|
78c877f63a29e81bcea2b8ae5034d40d28bf8d2f
|
[
"CC-BY-4.0",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] |
permissive
|
iiasa/message_ix
|
1e96ee254b6f67117faf4fb78ef8fd75d9ee0a6c
|
bf4469111a2d10c5dbc2c921e6a7e502e96aea34
|
refs/heads/main
| 2023-08-17T20:36:23.055462
| 2023-08-14T08:46:24
| 2023-08-14T08:46:24
| 113,015,968
| 103
| 162
|
Apache-2.0
| 2023-09-08T11:56:21
| 2017-12-04T08:29:06
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,794
|
py
|
test_tutorial.py
|
from ixmp.reporting import Key
from message_ix import Scenario
from message_ix.testing import SCENARIO
from message_ix.util import make_df
from message_ix.util.tutorial import prepare_plots, solve_modified
def test_prepare_plots(dantzig_reporter):
# Function runs without error
prepare_plots(dantzig_reporter)
# Plot keys are added; contains a task with 2 elements
(func, key) = dantzig_reporter.graph["plot new capacity"]
# First element is a callable partial object with certain keywords
assert callable(func)
assert func.keywords == dict(
dims=("nl", "t", "yv"),
units="GWa",
title="Energy System New Capacity",
cf=1.0,
stacked=True,
)
# Second element is a key
assert Key("CAP_NEW", ["nl", "t", "yv"]) == key
def test_solve_modified(caplog, message_test_mp):
base = Scenario(message_test_mp, **SCENARIO["dantzig"])
# Base objective value
base.solve(quiet=True)
base_obj = base.var("OBJ")["lvl"]
with solve_modified(base, "new scenario name") as scenario:
# Scenario is not yet solved
assert not scenario.has_solution()
# Scenario has the indicated new name
assert "new scenario name" == scenario.scenario
# Change one demand parameter from 325 to 326
scenario.add_par(
"demand",
make_df(
"demand",
node=["new-york"],
commodity=["cases"],
level="consumption",
year=1963,
time="year",
value=326,
unit="case",
),
)
# Scenario is solved at the end of the with: statement
assert scenario.has_solution()
assert scenario.var("OBJ")["lvl"] != base_obj
|
e0bd674973ff78666e7224357e33561e6a5c46bb
|
639437d2d764971680f4c9cf9cf2108b2898b245
|
/sdk/datahub-python-sdk/setup.py
|
e31365acc7288d664372ba3969f6396a2ba8298d
|
[
"MIT"
] |
permissive
|
macacajs/macaca-datahub
|
b06df95e1a6dccba84e09783de248ea3cb8025cb
|
cc22e536de55c7b4ba7958f19a190c8f6e2d53de
|
refs/heads/master
| 2023-08-27T11:45:35.474410
| 2022-12-16T08:40:19
| 2022-12-16T08:40:19
| 111,818,989
| 546
| 71
|
MIT
| 2023-09-11T04:07:47
| 2017-11-23T14:28:42
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,211
|
py
|
setup.py
|
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='datahub-sdk',
version='1.0.4',
description='Macaca DataHub Client',
long_description=long_description,
url='https://github.com/macacajs/datahub-python-sdk',
author='xdf',
author_email='xudafeng@126.com',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords=[
'data',
'management',
'continues'
],
packages=find_packages(exclude=['tests*', 'docs']),
install_requires=[
'enum34',
'requests',
'retrying'
],
extras_require={
'test': ['pytest', 'tox', 'pytest-xdist', 'pytest-cov', 'coverage', 'responses']
}
)
|
6a6b3cd0f6df1d4633887e620c44a9d104062393
|
fae16a539b7c1b0525aab40ddaeee3e451fc9b74
|
/src/argilla/tasks/users/create.py
|
62bfa31eae333d08873b7bbf3c83341029985dc8
|
[
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] |
permissive
|
argilla-io/argilla
|
a6b45f7f64e9db82f6d9a61771d758ffbb3dab4a
|
7c1b2368b444b7b7a281d37ad51bcb2d8e92acf5
|
refs/heads/develop
| 2023-09-04T03:58:05.914619
| 2023-09-01T15:58:31
| 2023-09-01T15:58:31
| 362,500,938
| 1,085
| 122
|
Apache-2.0
| 2023-09-14T15:31:07
| 2021-04-28T14:37:42
|
Python
|
UTF-8
|
Python
| false
| false
| 2,724
|
py
|
create.py
|
# Copyright 2021-present, the Recognai S.L. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional
import typer
from argilla.client.sdk.users.models import UserRole
def create_user(
username: str = typer.Option(..., prompt=True, help="The username of the user to be created"),
password: str = typer.Option(..., prompt=True, help="The password of the user to be created"),
first_name: Optional[str] = typer.Option(None, help="The first name of the user to be created"),
last_name: Optional[str] = typer.Option(None, help="The last name of the user to be created"),
role: UserRole = typer.Option(UserRole.annotator, help="The role of the user to be created"),
workspaces: Optional[List[str]] = typer.Option(
None,
"--workspace",
help="A workspace name to which the user will be linked to. This option can be provided several times.",
),
) -> None:
from rich.console import Console
from rich.markdown import Markdown
from argilla.client.users import User
from argilla.tasks.rich import get_argilla_themed_panel
try:
user = User.create(
username=username,
password=password,
first_name=first_name,
last_name=last_name,
role=role,
workspaces=workspaces,
)
except KeyError as e:
typer.echo(f"User with '{username}' already exists!")
raise typer.Exit(code=1) from e
except ValueError as e:
typer.echo(f"Provided parameters are not valid:\n\n{e}")
raise typer.Exit(code=1) from e
except RuntimeError as e:
typer.echo("An unexpected error occurred when trying to create the user")
raise typer.Exit(code=1) from e
panel = get_argilla_themed_panel(
Markdown(
f"- **Username**: {user.username}\n"
f"- **Role**: {user.role}\n"
f"- **First name**: {user.first_name}\n"
f"- **Last name**: {user.last_name}\n"
f"- **API Key**: {user.api_key}\n"
f"- **Workspaces**: {user.workspaces}"
),
title="User created",
title_align="left",
)
Console().print(panel)
|
7e33a15bcd10fe34f653928a84bd04fb86c03036
|
554ac81a19409420c076fed11616e259526e8e0d
|
/Chapter02/chapter2_5.py
|
43d4fc2b1e9373a152fb795a1111adb56a4ba4f4
|
[
"MIT"
] |
permissive
|
PacktPublishing/Mastering-Python-Networking-Third-Edition
|
9fc013efb932a2172a99a363e9f50fa824321e81
|
0c1a6ea557710f3b12cdb310dd9cb4ecca2dbce3
|
refs/heads/master
| 2022-09-13T22:04:08.292000
| 2021-12-07T23:50:04
| 2021-12-07T23:50:04
| 209,299,242
| 116
| 95
|
MIT
| 2022-09-03T12:15:06
| 2019-09-18T12:09:31
|
Python
|
UTF-8
|
Python
| false
| false
| 650
|
py
|
chapter2_5.py
|
#!/usr/bin/env python
from nornir import InitNornir
from nornir.plugins.tasks.networking import netmiko_send_command
from nornir.plugins.functions.text import print_result
# If you are using Nornir3, there are some changes:
# https://github.com/twin-bridges/nornir_course/blob/master/nornir3_changes.md
# Please pip install: pip install nornir_utils nornir_netmiko
# Then replace the 2 nornir.plugin imports with below:
#from nornir_utils.plugins.functions import print_result
#from nornir_netmiko import netmiko_send_command
nr = InitNornir()
result = nr.run(
task=netmiko_send_command,
command_string="show arp"
)
print_result(result)
|
38fe572b3f90d553386d1fd0a4245b66c1b3ad41
|
38bd31e53bdc4d8ed8a06544931ad9c3db5b6f5b
|
/slowfast/visualization/gradcam_utils.py
|
07c0fa28f257a2c8202ec98a3556b327293f80cd
|
[
"Apache-2.0"
] |
permissive
|
facebookresearch/SlowFast
|
1ee6f666d20e5adab8e86949649bcaf4bf6cd820
|
6092dad7be32bb1d6b71fe1bade258dc8b492fe3
|
refs/heads/main
| 2023-09-03T12:15:35.478138
| 2023-08-26T20:55:56
| 2023-08-26T20:55:56
| 203,465,734
| 6,221
| 1,325
|
Apache-2.0
| 2023-09-12T23:51:28
| 2019-08-20T22:47:26
|
Python
|
UTF-8
|
Python
| false
| false
| 6,958
|
py
|
gradcam_utils.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import matplotlib.pyplot as plt
import torch
import torch.nn.functional as F
import slowfast.datasets.utils as data_utils
from slowfast.visualization.utils import get_layer
class GradCAM:
"""
GradCAM class helps create localization maps using the Grad-CAM method for input videos
and overlap the maps over the input videos as heatmaps.
https://arxiv.org/pdf/1610.02391.pdf
"""
def __init__(
self, model, target_layers, data_mean, data_std, colormap="viridis"
):
"""
Args:
model (model): the model to be used.
target_layers (list of str(s)): name of convolutional layer to be used to get
gradients and feature maps from for creating localization maps.
data_mean (tensor or list): mean value to add to input videos.
data_std (tensor or list): std to multiply for input videos.
colormap (Optional[str]): matplotlib colormap used to create heatmap.
See https://matplotlib.org/3.3.0/tutorials/colors/colormaps.html
"""
self.model = model
# Run in eval mode.
self.model.eval()
self.target_layers = target_layers
self.gradients = {}
self.activations = {}
self.colormap = plt.get_cmap(colormap)
self.data_mean = data_mean
self.data_std = data_std
self._register_hooks()
def _register_single_hook(self, layer_name):
"""
Register forward and backward hook to a layer, given layer_name,
to obtain gradients and activations.
Args:
layer_name (str): name of the layer.
"""
def get_gradients(module, grad_input, grad_output):
self.gradients[layer_name] = grad_output[0].detach()
def get_activations(module, input, output):
self.activations[layer_name] = output.clone().detach()
target_layer = get_layer(self.model, layer_name=layer_name)
target_layer.register_forward_hook(get_activations)
target_layer.register_backward_hook(get_gradients)
def _register_hooks(self):
"""
Register hooks to layers in `self.target_layers`.
"""
for layer_name in self.target_layers:
self._register_single_hook(layer_name=layer_name)
def _calculate_localization_map(self, inputs, labels=None):
"""
Calculate localization map for all inputs with Grad-CAM.
Args:
inputs (list of tensor(s)): the input clips.
labels (Optional[tensor]): labels of the current input clips.
Returns:
localization_maps (list of ndarray(s)): the localization map for
each corresponding input.
preds (tensor): shape (n_instances, n_class). Model predictions for `inputs`.
"""
assert len(inputs) == len(
self.target_layers
), "Must register the same number of target layers as the number of input pathways."
input_clone = [inp.clone() for inp in inputs]
preds = self.model(input_clone)
if labels is None:
score = torch.max(preds, dim=-1)[0]
else:
if labels.ndim == 1:
labels = labels.unsqueeze(-1)
score = torch.gather(preds, dim=1, index=labels)
self.model.zero_grad()
score = torch.sum(score)
score.backward()
localization_maps = []
for i, inp in enumerate(inputs):
_, _, T, H, W = inp.size()
gradients = self.gradients[self.target_layers[i]]
activations = self.activations[self.target_layers[i]]
B, C, Tg, _, _ = gradients.size()
weights = torch.mean(gradients.view(B, C, Tg, -1), dim=3)
weights = weights.view(B, C, Tg, 1, 1)
localization_map = torch.sum(
weights * activations, dim=1, keepdim=True
)
localization_map = F.relu(localization_map)
localization_map = F.interpolate(
localization_map,
size=(T, H, W),
mode="trilinear",
align_corners=False,
)
localization_map_min, localization_map_max = (
torch.min(localization_map.view(B, -1), dim=-1, keepdim=True)[
0
],
torch.max(localization_map.view(B, -1), dim=-1, keepdim=True)[
0
],
)
localization_map_min = torch.reshape(
localization_map_min, shape=(B, 1, 1, 1, 1)
)
localization_map_max = torch.reshape(
localization_map_max, shape=(B, 1, 1, 1, 1)
)
# Normalize the localization map.
localization_map = (localization_map - localization_map_min) / (
localization_map_max - localization_map_min + 1e-6
)
localization_map = localization_map.data
localization_maps.append(localization_map)
return localization_maps, preds
def __call__(self, inputs, labels=None, alpha=0.5):
"""
Visualize the localization maps on their corresponding inputs as heatmap,
using Grad-CAM.
Args:
inputs (list of tensor(s)): the input clips.
labels (Optional[tensor]): labels of the current input clips.
alpha (float): transparency level of the heatmap, in the range [0, 1].
Returns:
result_ls (list of tensor(s)): the visualized inputs.
preds (tensor): shape (n_instances, n_class). Model predictions for `inputs`.
"""
result_ls = []
localization_maps, preds = self._calculate_localization_map(
inputs, labels=labels
)
for i, localization_map in enumerate(localization_maps):
# Convert (B, 1, T, H, W) to (B, T, H, W)
localization_map = localization_map.squeeze(dim=1)
if localization_map.device != torch.device("cpu"):
localization_map = localization_map.cpu()
heatmap = self.colormap(localization_map)
heatmap = heatmap[:, :, :, :, :3]
# Permute input from (B, C, T, H, W) to (B, T, H, W, C)
curr_inp = inputs[i].permute(0, 2, 3, 4, 1)
if curr_inp.device != torch.device("cpu"):
curr_inp = curr_inp.cpu()
curr_inp = data_utils.revert_tensor_normalize(
curr_inp, self.data_mean, self.data_std
)
heatmap = torch.from_numpy(heatmap)
curr_inp = alpha * heatmap + (1 - alpha) * curr_inp
# Permute inp to (B, T, C, H, W)
curr_inp = curr_inp.permute(0, 1, 4, 2, 3)
result_ls.append(curr_inp)
return result_ls, preds
|
1696cf5da822390ce62b7d95d00b4afbcd5d5742
|
518bf342bc4138982af3e2724e75f1d9ca3ba56c
|
/solutions/0656. Coin Path/0656-2.py
|
c27a233e1398157b042f28e1dc29c2cf332e3a67
|
[
"MIT"
] |
permissive
|
walkccc/LeetCode
|
dae85af7cc689882a84ee5011f0a13a19ad97f18
|
a27be41c174565d365cbfe785f0633f634a01b2a
|
refs/heads/main
| 2023-08-28T01:32:43.384999
| 2023-08-20T19:00:45
| 2023-08-20T19:00:45
| 172,231,974
| 692
| 302
|
MIT
| 2023-08-13T14:48:42
| 2019-02-23T15:46:23
|
C++
|
UTF-8
|
Python
| false
| false
| 800
|
py
|
0656-2.py
|
class Solution:
def cheapestJump(self, coins: List[int], maxJump: int) -> List[int]:
if coins[-1] == -1:
return []
n = len(coins)
# dp[i] := min cost to jump to n - 1 from i
dp = [math.inf] * n
next = [-1] * n
dp[-1] = coins[-1]
for i in reversed(range(n - 1)):
if coins[i] == -1:
continue
for j in range(i + 1, min(i + maxJump + 1, n)):
if dp[j] == math.inf:
continue
cost = coins[i] + dp[j]
if cost < dp[i]:
dp[i] = cost
next[i] = j
if dp[0] == math.inf:
return []
return self._constructPath(next, 0)
def _constructPath(self, next: List[int], i: int) -> List[int]:
ans = []
while i != -1:
ans.append(i + 1) # 1-indexed
i = next[i]
return ans
|
ce8b979197410ff276f8acf95b8defe6fb216b2f
|
8f267fe1157904023004aa1fcee8cdcaf1d69f74
|
/tempest/tests/lib/services/identity/v3/test_domain_configuration_client.py
|
72e5bd2d5d577bf690f7f51fda95fa59f5a0567f
|
[
"Apache-2.0"
] |
permissive
|
openstack/tempest
|
a65737f3e62d4ebeb7e387feac7bcc636d3f5fe0
|
3932a799e620a20d7abf7b89e21b520683a1809b
|
refs/heads/master
| 2023-08-28T15:04:21.241805
| 2023-08-28T10:16:57
| 2023-08-28T10:16:57
| 2,356,406
| 270
| 407
|
Apache-2.0
| 2022-06-29T15:52:45
| 2011-09-09T15:56:02
|
Python
|
UTF-8
|
Python
| false
| false
| 8,295
|
py
|
test_domain_configuration_client.py
|
# Copyright 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tempest.lib.services.identity.v3 import domain_configuration_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestDomainConfigurationClient(base.BaseServiceTest):
FAKE_CONFIG_SETTINGS = {
"config": {
"identity": {
"driver": "ldap"
},
"ldap": {
"url": "ldap://localhost",
"user": "",
"suffix": "cn=example,cn=com",
}
}
}
FAKE_DOMAIN_ID = '07ef7d04-2941-4bee-8551-f79f08a021de'
def setUp(self):
super(TestDomainConfigurationClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = domain_configuration_client.DomainConfigurationClient(
fake_auth, 'identity', 'regionOne')
def _test_show_default_config_settings(self, bytes_body=False):
self.check_service_client_function(
self.client.show_default_config_settings,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_CONFIG_SETTINGS,
bytes_body)
def _test_show_default_group_config(self, bytes_body=False):
self.check_service_client_function(
self.client.show_default_group_config,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_CONFIG_SETTINGS['config']['ldap'],
bytes_body,
group='ldap')
def _test_show_default_group_option(self, bytes_body=False):
self.check_service_client_function(
self.client.show_default_group_option,
'tempest.lib.common.rest_client.RestClient.get',
{'driver': 'ldap'},
bytes_body,
group='identity',
option='driver')
def _test_show_domain_group_option_config(self, bytes_body=False):
self.check_service_client_function(
self.client.show_domain_group_option_config,
'tempest.lib.common.rest_client.RestClient.get',
{'driver': 'ldap'},
bytes_body,
domain_id=self.FAKE_DOMAIN_ID,
group='identity',
option='driver')
def _test_update_domain_group_option_config(self, bytes_body=False):
self.check_service_client_function(
self.client.update_domain_group_option_config,
'tempest.lib.common.rest_client.RestClient.patch',
self.FAKE_CONFIG_SETTINGS,
bytes_body,
domain_id=self.FAKE_DOMAIN_ID,
group='identity',
option='driver',
url='http://myldap/my_other_root')
def _test_show_domain_group_config(self, bytes_body=False):
self.check_service_client_function(
self.client.show_domain_group_config,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_CONFIG_SETTINGS['config']['ldap'],
bytes_body,
domain_id=self.FAKE_DOMAIN_ID,
group='ldap')
def _test_update_domain_group_config(self, bytes_body=False):
self.check_service_client_function(
self.client.update_domain_group_config,
'tempest.lib.common.rest_client.RestClient.patch',
self.FAKE_CONFIG_SETTINGS['config']['ldap'],
bytes_body,
domain_id=self.FAKE_DOMAIN_ID,
group='ldap',
**self.FAKE_CONFIG_SETTINGS['config'])
def _test_create_domain_config(self, bytes_body=False):
self.check_service_client_function(
self.client.create_domain_config,
'tempest.lib.common.rest_client.RestClient.put',
self.FAKE_CONFIG_SETTINGS,
bytes_body,
domain_id=self.FAKE_DOMAIN_ID,
status=201)
def _test_show_domain_config(self, bytes_body=False):
self.check_service_client_function(
self.client.show_domain_config,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_CONFIG_SETTINGS,
bytes_body,
domain_id=self.FAKE_DOMAIN_ID)
def _test_update_domain_config(self, bytes_body=False):
self.check_service_client_function(
self.client.update_domain_config,
'tempest.lib.common.rest_client.RestClient.patch',
self.FAKE_CONFIG_SETTINGS,
bytes_body,
domain_id=self.FAKE_DOMAIN_ID)
def test_show_default_config_settings_with_str_body(self):
self._test_show_default_config_settings()
def test_show_default_config_settings_with_bytes_body(self):
self._test_show_default_config_settings(bytes_body=True)
def test_show_default_group_config_with_str_body(self):
self._test_show_default_group_config()
def test_show_default_group_config_with_bytes_body(self):
self._test_show_default_group_config(bytes_body=True)
def test_show_default_group_option_with_str_body(self):
self._test_show_default_group_option()
def test_show_default_group_option_with_bytes_body(self):
self._test_show_default_group_option(bytes_body=True)
def test_show_domain_group_option_config_with_str_body(self):
self._test_show_domain_group_option_config()
def test_show_domain_group_option_config_with_bytes_body(self):
self._test_show_domain_group_option_config(bytes_body=True)
def test_update_domain_group_option_config_with_str_body(self):
self._test_update_domain_group_option_config()
def test_update_domain_group_option_config_with_bytes_body(self):
self._test_update_domain_group_option_config(bytes_body=True)
def test_delete_domain_group_option_config(self):
self.check_service_client_function(
self.client.delete_domain_group_option_config,
'tempest.lib.common.rest_client.RestClient.delete',
{},
status=204,
domain_id=self.FAKE_DOMAIN_ID,
group='identity',
option='driver')
def test_show_domain_group_config_with_str_body(self):
self._test_show_domain_group_config()
def test_show_domain_group_config_with_bytes_body(self):
self._test_show_domain_group_config(bytes_body=True)
def test_test_update_domain_group_config_with_str_body(self):
self._test_update_domain_group_config()
def test_update_domain_group_config_with_bytes_body(self):
self._test_update_domain_group_config(bytes_body=True)
def test_delete_domain_group_config(self):
self.check_service_client_function(
self.client.delete_domain_group_config,
'tempest.lib.common.rest_client.RestClient.delete',
{},
status=204,
domain_id=self.FAKE_DOMAIN_ID,
group='identity')
def test_create_domain_config_with_str_body(self):
self._test_create_domain_config()
def test_create_domain_config_with_bytes_body(self):
self._test_create_domain_config(bytes_body=True)
def test_show_domain_config_with_str_body(self):
self._test_show_domain_config()
def test_show_domain_config_with_bytes_body(self):
self._test_show_domain_config(bytes_body=True)
def test_update_domain_config_with_str_body(self):
self._test_update_domain_config()
def test_update_domain_config_with_bytes_body(self):
self._test_update_domain_config(bytes_body=True)
def test_delete_domain_config(self):
self.check_service_client_function(
self.client.delete_domain_config,
'tempest.lib.common.rest_client.RestClient.delete',
{},
status=204,
domain_id=self.FAKE_DOMAIN_ID)
|
35c5042ad1b7bf766e8b3b62cd67e9fa02d49f27
|
2a1b8a671aceda6bc446f8ce26400aa84fa444a6
|
/Packs/Box/Integrations/BoxEventsCollector/BoxEventsCollector.py
|
74545b12dc425eafb7123ad8cf0cbf5a2b8d59c3
|
[
"MIT"
] |
permissive
|
demisto/content
|
6d4722d46f0ff0beea2748e9f7de585bf91a78b4
|
890def5a0e0ae8d6eaa538148249ddbc851dbb6b
|
refs/heads/master
| 2023-09-04T00:02:25.618032
| 2023-09-03T21:56:22
| 2023-09-03T21:56:22
| 60,525,392
| 1,023
| 1,921
|
MIT
| 2023-09-14T20:55:24
| 2016-06-06T12:17:02
|
Python
|
UTF-8
|
Python
| false
| false
| 8,962
|
py
|
BoxEventsCollector.py
|
# pylint: disable=no-name-in-module
# pylint: disable=no-self-argument
import dateparser
import secrets
import jwt
import urllib3
from cryptography import exceptions
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.serialization import load_pem_private_key
from pydantic import Field, parse_obj_as
from SiemApiModule import * # noqa: E402
urllib3.disable_warnings()
class Claims(BaseModel):
iss: str = Field(alias='client_id')
sub: str = Field(alias='id', description='user id or enterprise id')
box_sub_type = 'enterprise'
aud: AnyUrl
jti: str = secrets.token_hex(64)
exp: int = round(time.time()) + 45
class AppAuth(BaseModel):
publicKeyID: str
privateKey: str
passphrase: str
class BoxAppSettings(BaseModel):
clientID: str
clientSecret: str
appAuth: AppAuth
class Config:
arbitrary_types_allowed = True
class BoxCredentials(BaseModel):
enterpriseID: str
boxAppSettings: BoxAppSettings
class MyConfig:
validate_assignment = True
def get_box_events_timestamp_format(value):
"""Converting int(epoch), str(3 days) or datetime to Box's api time"""
timestamp: Optional[datetime]
if isinstance(value, int):
value = str(value)
if not isinstance(value, datetime):
timestamp = dateparser.parse(value)
if timestamp is None:
raise TypeError(f'after is not a valid time {value}')
return timestamp.isoformat('T', 'seconds')
class BoxEventsParams(BaseModel):
event_type: Optional[str] = None
limit: int = Field(500, alias='page_size', gt=0, le=500)
stream_position: Optional[str]
stream_type = 'admin_logs'
created_after: Optional[str]
# validators
_normalize_after = validator('created_after', pre=True, allow_reuse=True)(
get_box_events_timestamp_format
)
class Config:
validate_assignment = True
def not_gate(v):
"""Due to a bug in the validator object (collision with CommonServerPython)
we can pass this a a simple lambda. So here we are.
Just doing not if v is bool, else it is true.
Used when getting insecure and should change the insecure to verify.
insecure == True, verify is False.
"""
v_ = parse_obj_as(bool, False if v is None else v)
return not v_
class BoxEventsRequestConfig(IntegrationHTTPRequest):
# Endpoint: https://developer.box.com/reference/get-events/
method = Method.GET
params: BoxEventsParams # type: ignore[assignment]
verify: Optional[bool] = Field(True, alias='insecure') # type: ignore[assignment]
# validators
_oppsite_verify = validator('verify', allow_reuse=True)(not_gate)
class BoxIntegrationOptions(IntegrationOptions):
product_name = 'box'
vendor_name = 'box'
should_push_events: bool = False
class BoxEventsClient(IntegrationEventsClient):
request: BoxEventsRequestConfig
options: IntegrationOptions
def __init__(
self,
request: BoxEventsRequestConfig,
options: IntegrationOptions,
box_credentials: BoxCredentials,
api_url: str,
session: Optional[requests.Session] = None,
) -> None:
self.api_url: str = api_url
self.authorization_url = parse_obj_as(AnyUrl, urljoin(str(self.api_url), '/oauth2/token'))
if session is None:
session = requests.Session()
self.box_credentials = box_credentials
super().__init__(request, options, session)
def set_request_filter(self, after: str):
self.request.params.stream_position = after
def authenticate(self):
request = IntegrationHTTPRequest(
method=Method.POST,
url=self.authorization_url,
data=self._create_authorization_body(),
verify=self.request.verify,
)
response = self.call(request)
self.access_token = response.json()['access_token']
self.request.headers = {'Authorization': f'Bearer {self.access_token}'}
def _create_authorization_body(self):
claims = Claims(
client_id=self.box_credentials.boxAppSettings.clientID,
id=self.box_credentials.enterpriseID,
aud=self.authorization_url,
)
decrypted_private_key = _decrypt_private_key(
self.box_credentials.boxAppSettings.appAuth
)
assertion = jwt.encode(
payload=claims.dict(),
key=decrypted_private_key,
algorithm='RS512',
headers={
'kid': self.box_credentials.boxAppSettings.appAuth.publicKeyID
},
)
body = {
'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer',
'assertion': assertion,
'client_id': self.box_credentials.boxAppSettings.clientID,
'client_secret': self.box_credentials.boxAppSettings.clientSecret,
}
return body
class BoxEventsGetter(IntegrationGetEvents):
client: BoxEventsClient
def get_last_run(self: Any) -> dict: # type: ignore
demisto.debug(f'getting {self.client.request.params.stream_position=}')
return {'stream_position': self.client.request.params.stream_position}
def _iter_events(self):
self.client.authenticate()
demisto.debug('authenticated successfully')
# region First Call
events = self.client.call(self.client.request).json()
# endregion
# region Yield Response
while True: # Run as long there are logs
self.client.set_request_filter(events['next_stream_position'])
# The next stream position points to where new messages will arrive.
demisto.debug(
f'setting the next request filter {events["next_stream_position"]=}'
)
if not events['entries']:
break
yield events['entries']
# endregion
# region Do next call
events = self.client.call(self.client.request).json()
# endregion
def _decrypt_private_key(app_auth: AppAuth):
"""
Attempts to load the private key as given in the integration configuration.
:return: an initialized Private key object.
"""
try:
key = load_pem_private_key(
data=app_auth.privateKey.encode('utf8'),
password=app_auth.passphrase.encode('utf8'),
backend=default_backend(),
)
except (
TypeError,
ValueError,
exceptions.UnsupportedAlgorithm,
) as exception:
raise DemistoException(
'An error occurred while loading the private key.', exception
)
return key
def main(command: str, demisto_params: dict):
try:
box_credentials = BoxCredentials.parse_raw(
demisto_params['credentials_json']['password']
)
events_request_params = demisto_params.copy()
events_request_params['url'] = urljoin(demisto_params.get('url', 'https://api.box.com'), '/2.0/events')
request = BoxEventsRequestConfig(
params=BoxEventsParams.parse_obj(events_request_params),
**events_request_params,
)
options = BoxIntegrationOptions.parse_obj(demisto_params)
client = BoxEventsClient(request, options, box_credentials,
api_url=demisto_params.get('url', 'https://api.box.com'))
get_events = BoxEventsGetter(client, options)
if command == 'test-module':
get_events.client.request.params.limit = 1
get_events.run()
demisto.results('ok')
return
demisto.debug('not in test module, running box-get-events')
events = get_events.run()
demisto.debug(f'got {len(events)=} from api')
if command == 'box-get-events':
demisto.debug('box-get-events, publishing events to incident')
return_results(CommandResults('BoxEvents', 'event_id', events))
if options.should_push_events:
send_events_to_xsiam(events, options.vendor_name, options.product_name)
if command == 'fetch-events':
last_run = get_events.get_last_run()
demisto.debug(
f'in fetch-events. settings should push events to true, setting {last_run=}'
)
send_events_to_xsiam(events, options.vendor_name, options.product_name)
demisto.setLastRun(last_run)
demisto.debug(f'finished fetching events. {options.should_push_events=}')
except Exception as e:
return_error(f'Failed to execute {command} command.\nError:\n{e}\nTraceback:{traceback.format_exc()}')
if __name__ in ('__main__', '__builtin__', 'builtins'):
# Args is always stronger. Get getLastRun even stronger
demisto_params_ = demisto.params() | demisto.args() | demisto.getLastRun()
main(demisto.command(), demisto_params_)
|
770236195638bb038d129832eaf715b3be275c07
|
e4e7461d012360b32779a851f8734c0a18289bf1
|
/logic/leaderboard.py
|
37cd4b82d95764f55e58a4d7ed7ccf5ccf4da598
|
[
"MIT"
] |
permissive
|
Yelp/love
|
3650755da9e6cae20b1a0ea98d580daad0aab6b6
|
f9f1c47616c247c20110f60a876049d9908d0e03
|
refs/heads/master
| 2023-09-04T09:53:46.720336
| 2021-11-08T19:10:51
| 2021-11-08T19:10:51
| 55,083,938
| 165
| 41
|
MIT
| 2023-05-01T20:16:11
| 2016-03-30T17:41:25
|
Python
|
UTF-8
|
Python
| false
| false
| 1,311
|
py
|
leaderboard.py
|
# -*- coding: utf-8 -*-
from datetime import datetime
from datetime import timedelta
from logic import TIMESPAN_LAST_WEEK
from logic import to_the_future
from logic import utc_week_limits
import logic.love_count
def get_leaderboard_data(timespan, department, office=None):
# If last week, we need to subtract *before* getting the week limits to
# avoid being off by one hour on weeks that include a DST transition
utc_now = datetime.utcnow()
if timespan == TIMESPAN_LAST_WEEK:
utc_now -= timedelta(days=7)
utc_week_start, _ = utc_week_limits(utc_now)
top_lovers, top_lovees = logic.love_count.top_lovers_and_lovees(
utc_week_start,
dept=department,
office=office,
)
top_lover_dicts = [
{
'employee': employee_key.get_async(),
'num_sent': sent_count
}
for employee_key, sent_count
in top_lovers
]
top_loved_dicts = [
{
'employee': employee_key.get_async(),
'num_received': received_count
}
for employee_key, received_count
in top_lovees
]
# get results for the futures set up previously
map(to_the_future, top_lover_dicts)
map(to_the_future, top_loved_dicts)
return (top_lover_dicts, top_loved_dicts)
|
9ad01dc6240f53a23ab209a7d58f5ffeb86bdc64
|
002d925a46fef6867c7092935a5a4113a11cf0c5
|
/care/facility/migrations_old/0261_auto_20210710_2305.py
|
ec77872cc25a18d8379c3c390808b0c70c515a33
|
[
"MIT"
] |
permissive
|
coronasafe/care
|
ba74c06c6486e8cd3c11e0f8b3d948e99d304746
|
c000eea7f1c79a37b0fa53eba09696cd95122202
|
refs/heads/master
| 2023-08-31T12:52:08.181541
| 2023-08-29T13:43:33
| 2023-08-29T13:43:33
| 247,995,671
| 216
| 218
|
MIT
| 2023-09-13T14:52:59
| 2020-03-17T14:48:11
|
Python
|
UTF-8
|
Python
| false
| false
| 30,257
|
py
|
0261_auto_20210710_2305.py
|
# Generated by Django 2.2.11 on 2021-07-10 17:35
import django.contrib.postgres.fields.jsonb
import django.core.validators
from django.db import migrations, models
import care.utils.models.validators
class Migration(migrations.Migration):
dependencies = [
("facility", "0260_auto_20210710_1742"),
]
operations = [
migrations.AddField(
model_name="dailyround",
name="base_excess",
field=models.IntegerField(
default=None,
null=True,
validators=[
django.core.validators.MinValueValidator(-20),
django.core.validators.MaxValueValidator(20),
],
),
),
migrations.AddField(
model_name="dailyround",
name="bilateral_air_entry",
field=models.BooleanField(blank=True, default=None, null=True),
),
migrations.AddField(
model_name="dailyround",
name="blood_sugar_level",
field=models.IntegerField(
default=None,
null=True,
validators=[
django.core.validators.MinValueValidator(0),
django.core.validators.MaxValueValidator(700),
],
),
),
migrations.AddField(
model_name="dailyround",
name="bp",
field=django.contrib.postgres.fields.jsonb.JSONField(
default=dict,
validators=[
care.utils.models.validators.JSONFieldSchemaValidator(
{
"$schema": "http://json-schema.org/draft-07/schema#",
"additionalProperties": False,
"properties": {
"diastolic": {"type": "number"},
"mean": {"type": "number"},
"systolic": {"type": "number"},
},
"type": "array",
}
)
],
),
),
migrations.AddField(
model_name="dailyround",
name="consciousness_level",
field=models.IntegerField(
choices=[
(0, "UNKNOWN"),
(5, "ALERT"),
(10, "DROWSY"),
(15, "STUPOROUS"),
(20, "COMATOSE"),
(25, "CANNOT_BE_ASSESSED"),
],
default=0,
),
),
migrations.AddField(
model_name="dailyround",
name="consciousness_level_detail",
field=models.TextField(blank=True, default=None, null=True),
),
migrations.AddField(
model_name="dailyround",
name="dialysis_fluid_balance",
field=models.IntegerField(
default=None,
null=True,
validators=[
django.core.validators.MinValueValidator(0),
django.core.validators.MaxValueValidator(700),
],
),
),
migrations.AddField(
model_name="dailyround",
name="dialysis_net_balance",
field=models.IntegerField(
default=None,
null=True,
validators=[
django.core.validators.MinValueValidator(0),
django.core.validators.MaxValueValidator(700),
],
),
),
migrations.AddField(
model_name="dailyround",
name="etco2",
field=models.IntegerField(
default=None,
null=True,
validators=[
django.core.validators.MinValueValidator(0),
django.core.validators.MaxValueValidator(200),
],
),
),
migrations.AddField(
model_name="dailyround",
name="feeds",
field=django.contrib.postgres.fields.jsonb.JSONField(
default=list,
validators=[
care.utils.models.validators.JSONFieldSchemaValidator(
{
"$schema": "http://json-schema.org/draft-07/schema#",
"items": [
{
"additionalProperties": False,
"properties": {
"name": {"type": "string"},
"quantity": {"type": "number"},
},
"required": ["name", "quantity"],
"type": "object",
}
],
"type": "array",
}
)
],
),
),
migrations.AddField(
model_name="dailyround",
name="glasgow_eye_open",
field=models.IntegerField(
default=None,
null=True,
validators=[
django.core.validators.MinValueValidator(1),
django.core.validators.MaxValueValidator(4),
],
),
),
migrations.AddField(
model_name="dailyround",
name="glasgow_motor_response",
field=models.IntegerField(
default=None,
null=True,
validators=[
django.core.validators.MinValueValidator(1),
django.core.validators.MaxValueValidator(6),
],
),
),
migrations.AddField(
model_name="dailyround",
name="glasgow_total_calculated",
field=models.IntegerField(
default=None,
null=True,
validators=[
django.core.validators.MinValueValidator(3),
django.core.validators.MaxValueValidator(15),
],
),
),
migrations.AddField(
model_name="dailyround",
name="glasgow_verbal_response",
field=models.IntegerField(
default=None,
null=True,
validators=[
django.core.validators.MinValueValidator(1),
django.core.validators.MaxValueValidator(5),
],
),
),
migrations.AddField(
model_name="dailyround",
name="hco3",
field=models.DecimalField(
blank=True,
decimal_places=2,
default=None,
max_digits=4,
null=True,
validators=[
django.core.validators.MinValueValidator(5),
django.core.validators.MaxValueValidator(80),
],
),
),
migrations.AddField(
model_name="dailyround",
name="infusions",
field=django.contrib.postgres.fields.jsonb.JSONField(
default=list,
validators=[
care.utils.models.validators.JSONFieldSchemaValidator(
{
"$schema": "http://json-schema.org/draft-07/schema#",
"items": [
{
"additionalProperties": False,
"properties": {
"name": {"type": "string"},
"quantity": {"type": "number"},
},
"required": ["name", "quantity"],
"type": "object",
}
],
"type": "array",
}
)
],
),
),
migrations.AddField(
model_name="dailyround",
name="insulin_intake_dose",
field=models.DecimalField(
blank=True,
decimal_places=2,
default=None,
max_digits=5,
null=True,
validators=[
django.core.validators.MinValueValidator(0),
django.core.validators.MaxValueValidator(100),
],
),
),
migrations.AddField(
model_name="dailyround",
name="insulin_intake_frequency",
field=models.IntegerField(
choices=[(0, "UNKNOWN"), (5, "OD"), (10, "BD"), (15, "TD")], default=0
),
),
migrations.AddField(
model_name="dailyround",
name="iv_fluids",
field=django.contrib.postgres.fields.jsonb.JSONField(
default=list,
validators=[
care.utils.models.validators.JSONFieldSchemaValidator(
{
"$schema": "http://json-schema.org/draft-07/schema#",
"items": [
{
"additionalProperties": False,
"properties": {
"name": {"type": "string"},
"quantity": {"type": "number"},
},
"required": ["name", "quantity"],
"type": "object",
}
],
"type": "array",
}
)
],
),
),
migrations.AddField(
model_name="dailyround",
name="lactate",
field=models.DecimalField(
blank=True,
decimal_places=2,
default=None,
max_digits=4,
null=True,
validators=[
django.core.validators.MinValueValidator(0),
django.core.validators.MaxValueValidator(20),
],
),
),
migrations.AddField(
model_name="dailyround",
name="left_pupil_light_reaction",
field=models.IntegerField(
choices=[
(0, "UNKNOWN"),
(5, "BRISK"),
(10, "SLUGGISH"),
(15, "FIXED"),
(20, "CANNOT_BE_ASSESSED"),
],
default=0,
),
),
migrations.AddField(
model_name="dailyround",
name="left_pupil_light_reaction_detail",
field=models.TextField(blank=True, default=None, null=True),
),
migrations.AddField(
model_name="dailyround",
name="left_pupil_size",
field=models.IntegerField(
default=None,
null=True,
validators=[
django.core.validators.MinValueValidator(0),
django.core.validators.MaxValueValidator(8),
],
verbose_name="Left Pupil Size",
),
),
migrations.AddField(
model_name="dailyround",
name="left_pupil_size_detail",
field=models.TextField(blank=True, default=None, null=True),
),
migrations.AddField(
model_name="dailyround",
name="limb_response_lower_extremity_left",
field=models.IntegerField(
choices=[
(0, "UNKNOWN"),
(5, "STRONG"),
(10, "MODERATE"),
(15, "WEAK"),
(20, "FLEXION"),
(25, "EXTENSION"),
(30, "NONE"),
],
default=0,
),
),
migrations.AddField(
model_name="dailyround",
name="limb_response_upper_extremity_left",
field=models.IntegerField(
choices=[
(0, "UNKNOWN"),
(5, "STRONG"),
(10, "MODERATE"),
(15, "WEAK"),
(20, "FLEXION"),
(25, "EXTENSION"),
(30, "NONE"),
],
default=0,
),
),
migrations.AddField(
model_name="dailyround",
name="limb_response_upper_extremity_right",
field=models.IntegerField(
choices=[
(0, "UNKNOWN"),
(5, "STRONG"),
(10, "MODERATE"),
(15, "WEAK"),
(20, "FLEXION"),
(25, "EXTENSION"),
(30, "NONE"),
],
default=0,
),
),
migrations.AddField(
model_name="dailyround",
name="nursing",
field=django.contrib.postgres.fields.jsonb.JSONField(
default=list,
validators=[
care.utils.models.validators.JSONFieldSchemaValidator(
{
"$schema": "http://json-schema.org/draft-07/schema#",
"items": [
{
"additionalProperties": False,
"properties": {
"description": {"type": "string"},
"procedure": {"type": "string"},
},
"required": ["procedure", "description"],
"type": "object",
}
],
"type": "array",
}
)
],
),
),
migrations.AddField(
model_name="dailyround",
name="output",
field=django.contrib.postgres.fields.jsonb.JSONField(
default=list,
validators=[
care.utils.models.validators.JSONFieldSchemaValidator(
{
"$schema": "http://json-schema.org/draft-07/schema#",
"items": [
{
"additionalProperties": False,
"properties": {
"name": {"type": "string"},
"quantity": {"type": "number"},
},
"required": ["name", "quantity"],
"type": "object",
}
],
"type": "array",
}
)
],
),
),
migrations.AddField(
model_name="dailyround",
name="pco2",
field=models.IntegerField(
default=None,
null=True,
validators=[
django.core.validators.MinValueValidator(10),
django.core.validators.MaxValueValidator(200),
],
),
),
migrations.AddField(
model_name="dailyround",
name="ph",
field=models.DecimalField(
blank=True,
decimal_places=2,
default=None,
max_digits=4,
null=True,
validators=[
django.core.validators.MinValueValidator(0),
django.core.validators.MaxValueValidator(10),
],
),
),
migrations.AddField(
model_name="dailyround",
name="po2",
field=models.IntegerField(
default=None,
null=True,
validators=[
django.core.validators.MinValueValidator(10),
django.core.validators.MaxValueValidator(400),
],
),
),
migrations.AddField(
model_name="dailyround",
name="potassium",
field=models.DecimalField(
blank=True,
decimal_places=2,
default=None,
max_digits=4,
null=True,
validators=[
django.core.validators.MinValueValidator(1),
django.core.validators.MaxValueValidator(10),
],
),
),
migrations.AddField(
model_name="dailyround",
name="pressure_sore",
field=django.contrib.postgres.fields.jsonb.JSONField(
default=list,
validators=[
care.utils.models.validators.JSONFieldSchemaValidator(
{
"$schema": "http://json-schema.org/draft-07/schema#",
"items": [
{
"additionalProperties": False,
"properties": {
"region": {"type": "string"},
"scale": {
"maximum": 5,
"minimum": 1,
"type": "number",
},
},
"required": ["region", "scale"],
"type": "object",
}
],
"type": "array",
}
)
],
),
),
migrations.AddField(
model_name="dailyround",
name="pulse",
field=models.IntegerField(
default=None,
null=True,
validators=[
django.core.validators.MinValueValidator(0),
django.core.validators.MaxValueValidator(200),
],
),
),
migrations.AddField(
model_name="dailyround",
name="resp",
field=models.IntegerField(
default=None,
null=True,
validators=[
django.core.validators.MinValueValidator(10),
django.core.validators.MaxValueValidator(50),
],
),
),
migrations.AddField(
model_name="dailyround",
name="rhythm",
field=models.IntegerField(
choices=[(0, "UNKNOWN"), (5, "REGULAR"), (10, "IRREGULAR")], default=0
),
),
migrations.AddField(
model_name="dailyround",
name="rhythm_detail",
field=models.TextField(blank=True, default=None, null=True),
),
migrations.AddField(
model_name="dailyround",
name="right_pupil_light_reaction",
field=models.IntegerField(
choices=[
(0, "UNKNOWN"),
(5, "BRISK"),
(10, "SLUGGISH"),
(15, "FIXED"),
(20, "CANNOT_BE_ASSESSED"),
],
default=0,
),
),
migrations.AddField(
model_name="dailyround",
name="right_pupil_light_reaction_detail",
field=models.TextField(blank=True, default=None, null=True),
),
migrations.AddField(
model_name="dailyround",
name="right_pupil_size",
field=models.IntegerField(
default=None,
null=True,
validators=[
django.core.validators.MinValueValidator(0),
django.core.validators.MaxValueValidator(8),
],
verbose_name="Right Pupil Size",
),
),
migrations.AddField(
model_name="dailyround",
name="right_pupil_size_detail",
field=models.TextField(blank=True, default=None, null=True),
),
migrations.AddField(
model_name="dailyround",
name="rounds_type",
field=models.IntegerField(
choices=[(0, "NORMAL"), (100, "VENTILATOR"), (200, "ICU")], default=0
),
),
migrations.AddField(
model_name="dailyround",
name="sodium",
field=models.DecimalField(
blank=True,
decimal_places=2,
default=None,
max_digits=5,
null=True,
validators=[
django.core.validators.MinValueValidator(100),
django.core.validators.MaxValueValidator(170),
],
),
),
migrations.AddField(
model_name="dailyround",
name="taken_at",
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name="dailyround",
name="total_intake_calculated",
field=models.DecimalField(
blank=True, decimal_places=2, default=None, max_digits=6, null=True
),
),
migrations.AddField(
model_name="dailyround",
name="total_output",
field=models.DecimalField(
blank=True, decimal_places=2, default=None, max_digits=6, null=True
),
),
migrations.AddField(
model_name="dailyround",
name="ventilator_fi02",
field=models.IntegerField(
default=None,
null=True,
validators=[
django.core.validators.MinValueValidator(21),
django.core.validators.MaxValueValidator(100),
],
),
),
migrations.AddField(
model_name="dailyround",
name="ventilator_interface",
field=models.IntegerField(
choices=[(0, "UNKNOWN"), (5, "INVASIVE"), (10, "NON_INVASIVE")],
default=0,
),
),
migrations.AddField(
model_name="dailyround",
name="ventilator_mean_airway_pressure",
field=models.IntegerField(
default=None,
null=True,
validators=[
django.core.validators.MinValueValidator(0),
django.core.validators.MaxValueValidator(40),
],
),
),
migrations.AddField(
model_name="dailyround",
name="ventilator_mode",
field=models.IntegerField(
choices=[
(0, "UNKNOWN"),
(5, "VCV"),
(10, "PCV"),
(15, "PRVC"),
(20, "APRV"),
(25, "VC_SIMV"),
(30, "PC_SIMV"),
(40, "PRVC_SIMV"),
(45, "ASV"),
(50, "PSV"),
],
default=0,
),
),
migrations.AddField(
model_name="dailyround",
name="ventilator_oxygen_modality",
field=models.IntegerField(
choices=[
(0, "UNKNOWN"),
(5, "NASAL_PRONGS"),
(10, "SIMPLE_FACE_MASK"),
(15, "NON_REBREATHING_MASK"),
(20, "HIGH_FLOW_NASAL_CANNULA"),
],
default=0,
),
),
migrations.AddField(
model_name="dailyround",
name="ventilator_oxygen_modality_flow_rate",
field=models.IntegerField(
default=None,
null=True,
validators=[
django.core.validators.MinValueValidator(0),
django.core.validators.MaxValueValidator(70),
],
),
),
migrations.AddField(
model_name="dailyround",
name="ventilator_oxygen_modality_oxygen_rate",
field=models.IntegerField(
default=None,
null=True,
validators=[
django.core.validators.MinValueValidator(0),
django.core.validators.MaxValueValidator(50),
],
),
),
migrations.AddField(
model_name="dailyround",
name="ventilator_peep",
field=models.DecimalField(
blank=True,
decimal_places=2,
default=None,
max_digits=4,
null=True,
validators=[
django.core.validators.MinValueValidator(0),
django.core.validators.MaxValueValidator(30),
],
),
),
migrations.AddField(
model_name="dailyround",
name="ventilator_pip",
field=models.IntegerField(
default=None,
null=True,
validators=[
django.core.validators.MinValueValidator(0),
django.core.validators.MaxValueValidator(100),
],
),
),
migrations.AddField(
model_name="dailyround",
name="ventilator_pressure_support",
field=models.IntegerField(
default=None,
null=True,
validators=[
django.core.validators.MinValueValidator(0),
django.core.validators.MaxValueValidator(40),
],
),
),
migrations.AddField(
model_name="dailyround",
name="ventilator_resp_rate",
field=models.IntegerField(
default=None,
null=True,
validators=[
django.core.validators.MinValueValidator(0),
django.core.validators.MaxValueValidator(100),
],
),
),
migrations.AddField(
model_name="dailyround",
name="ventilator_spo2",
field=models.IntegerField(
default=None,
null=True,
validators=[
django.core.validators.MinValueValidator(0),
django.core.validators.MaxValueValidator(100),
],
),
),
migrations.AddField(
model_name="dailyround",
name="ventilator_tidal_volume",
field=models.IntegerField(
default=None,
null=True,
validators=[
django.core.validators.MinValueValidator(0),
django.core.validators.MaxValueValidator(1000),
],
),
),
migrations.AlterField(
model_name="dailyround",
name="temperature",
field=models.DecimalField(
blank=True,
decimal_places=2,
default=None,
max_digits=5,
null=True,
validators=[
django.core.validators.MinValueValidator(95),
django.core.validators.MaxValueValidator(106),
],
),
),
migrations.AlterField(
model_name="patientconsultation",
name="lines",
field=django.contrib.postgres.fields.jsonb.JSONField(
default=list,
validators=[
care.utils.models.validators.JSONFieldSchemaValidator(
{
"$schema": "http://json-schema.org/draft-07/schema#",
"items": [
{
"additionalProperties": False,
"properties": {
"site": {"type": "string"},
"start_date": {
"pattern": "^(\\d{4})-(\\d{2})-(\\d{2})T(\\d{2}):(\\d{2}):(\\d{2}(?:\\.\\d*)?)((-(\\d{2}):(\\d{2})|Z)?)$",
"type": "string",
},
"type": {"type": "string"},
},
"required": ["start_date", "type", "site"],
"type": "object",
}
],
"type": "array",
}
)
],
),
),
]
|
6425d869cbe9f93d0239f0f0dc790d5f40307a73
|
b7163b44b679e082fe97cf7fcd0c73b2fcdb38eb
|
/modules/dbnd/test_dbnd/parameters/test_parameter_value_type_loader.py
|
bf037adf5e1634bc3ad3c2687025aad584353def
|
[
"Apache-2.0"
] |
permissive
|
databand-ai/dbnd
|
70c95d95e12bfb8ab471a6dce27691ed658cb92d
|
d59c99dcdcd280d7eec36a693dd80f8c8c831ea2
|
refs/heads/develop
| 2023-06-24T18:07:56.524526
| 2023-05-28T07:57:36
| 2023-05-28T07:57:36
| 231,361,064
| 257
| 33
|
Apache-2.0
| 2023-08-06T08:30:28
| 2020-01-02T10:42:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,600
|
py
|
test_parameter_value_type_loader.py
|
# © Copyright Databand.ai, an IBM Company 2022
from dbnd import parameter
from targets.values import InlineValueType, ValueTypeLoader, register_value_type
class TypeWithValueTypeLoaderError(object):
pass
class TestParameterValueTypeLoader(object):
def test_value_type_loader(self):
register_value_type(
ValueTypeLoader(
"test_dbnd.parameters.lazy_types_examples.lazy_type_simple.LazyTypeForTest1",
"test_dbnd.parameters.lazy_types_examples.lazy_type_simple.LazyTypeForTest1_ValueType",
"dbnd-core",
)
)
from test_dbnd.parameters.lazy_types_examples.lazy_type_simple import (
LazyTypeForTest1,
LazyTypeForTest1_ValueType,
)
p_def = parameter[LazyTypeForTest1].parameter
assert isinstance(p_def.value_type, ValueTypeLoader)
# parameter after build
p = parameter[LazyTypeForTest1]._p
assert isinstance(p.value_type, LazyTypeForTest1_ValueType)
def test_value_type_loader_failed(self):
c = TypeWithValueTypeLoaderError
register_value_type(
ValueTypeLoader(
f"{c.__module__}.{c.__qualname__}",
"test_dbnd.parameters.NotExists",
"dbnd-core",
)
)
p_def = parameter[TypeWithValueTypeLoaderError].parameter
assert isinstance(p_def.value_type, ValueTypeLoader)
# parameter after build
p = parameter[TypeWithValueTypeLoaderError]._p
assert isinstance(p.value_type, InlineValueType)
|
c492c57fc60cb5811360b670e8970041c3f82686
|
eb9253b8c2a281bebddbf77fa02b249fa18f11b7
|
/configs/det/knet/knet_s3_r50_fpn_ms-3x_coco.py
|
72243317778ba437f44e664e527acaabdadf4e56
|
[
"Apache-2.0"
] |
permissive
|
ZwwWayne/K-Net
|
781ea91ebe17fb9887409ee030c0bfae8dc60b99
|
5e50ee58957dce972f51096804ff69171c2f072e
|
refs/heads/main
| 2023-05-23T18:43:16.806151
| 2021-12-16T12:43:08
| 2021-12-16T12:43:08
| 417,726,092
| 452
| 55
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 100
|
py
|
knet_s3_r50_fpn_ms-3x_coco.py
|
_base_ = [
'../_base_/models/knet_s3_r50_fpn.py',
'../common/mstrain_3x_coco_instance.py'
]
|
6e9aba7f12e216488b740550a72e43ceff727e30
|
9db247ecf5cc49927aa292f94d777565ac4b394c
|
/tests/version.py
|
c3e2351a99d7edf8041a66259063755bf86141eb
|
[
"BSD-3-Clause"
] |
permissive
|
rsmusllp/king-phisher
|
33d82b24fbf3a9b9f5a3c2db58a7c14161ca2902
|
1bbc1bf122a18085b1ea4af20c9af10cc4cf899e
|
refs/heads/master
| 2023-08-30T08:49:57.893481
| 2022-11-07T16:21:41
| 2022-11-07T16:21:41
| 15,592,447
| 1,018
| 231
|
BSD-3-Clause
| 2023-08-01T23:01:41
| 2014-01-02T20:00:55
|
Python
|
UTF-8
|
Python
| false
| false
| 3,017
|
py
|
version.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# tests/version.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import distutils.version
import unittest
from king_phisher import testing
from king_phisher.version import *
import requests
class VersionTests(testing.KingPhisherTestCase):
def test_distutils_version(self):
try:
distutils.version.StrictVersion(distutils_version)
except ValueError:
self.fail('the distutils version lable is not compatible with StrictVersion')
def test_version_info(self):
if version_label:
self.assertRegex(version_label, r'^(alpha|beta)\d*$', 'the version label is invalid')
version_regex = r'^\d+\.\d+\.\d+(-(alpha|beta)\d*)?( \(rev: [a-f0-9]{8,}\))?$'
self.assertRegex(version, version_regex, msg='the version format is invalid')
version_regex = r'^\d+\.\d+\.\d+((a|b)\d*)?$'
self.assertRegex(distutils_version, version_regex, msg='the distutils version format is invalid')
@testing.skip_if_offline
@testing.skip_on_travis
def test_github_releases(self):
releases = requests.get('https://api.github.com/repos/securestate/king-phisher/releases').json()
releases = [release for release in releases if not release['draft']]
for release in releases:
tag_name_regex = r'v\d+\.\d+\.\d+'
tag_name = release['tag_name']
self.assertRegex(tag_name, tag_name_regex, msg='the release tag name is invalid')
name = "{0}: Version {1}".format(tag_name, tag_name[1:])
self.assertEqual(name, release['name'], msg='the release name is invalid')
if __name__ == '__main__':
unittest.main()
|
7b3d778991ef6a3eaa7852414919b05b06a1b1a6
|
010279e2ba272d09e9d2c4e903722e5faba2cf7a
|
/contrib/python/py/py/_io/__init__.py
|
835f01f3ab9dcb656dce1e580f0d98d7b8abfe3a
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
catboost/catboost
|
854c1a1f439a96f1ae6b48e16644be20aa04dba2
|
f5042e35b945aded77b23470ead62d7eacefde92
|
refs/heads/master
| 2023-09-01T12:14:14.174108
| 2023-09-01T10:01:01
| 2023-09-01T10:22:12
| 97,556,265
| 8,012
| 1,425
|
Apache-2.0
| 2023-09-11T03:32:32
| 2017-07-18T05:29:04
|
Python
|
UTF-8
|
Python
| false
| false
| 29
|
py
|
__init__.py
|
""" input/output helping """
|
12657f03588cc5d675e43de7ee0ca5a32c823c63
|
6189f34eff2831e3e727cd7c5e43bc5b591adffc
|
/amqpstorm/management/queue.py
|
652fea0121004159ddb8e093786084faffb83508
|
[
"BSD-3-Clause"
] |
permissive
|
fake-name/ReadableWebProxy
|
24603660b204a9e7965cfdd4a942ff62d7711e27
|
ca2e086818433abc08c014dd06bfd22d4985ea2a
|
refs/heads/master
| 2023-09-04T03:54:50.043051
| 2023-08-26T16:08:46
| 2023-08-26T16:08:46
| 39,611,770
| 207
| 20
|
BSD-3-Clause
| 2023-09-11T15:48:15
| 2015-07-24T04:30:43
|
Python
|
UTF-8
|
Python
| false
| false
| 7,242
|
py
|
queue.py
|
from amqpstorm.compatibility import json
from amqpstorm.compatibility import quote
from amqpstorm.compatibility import urlparse
from amqpstorm.management.base import ManagementHandler
API_QUEUE = 'queues/%s/%s'
API_QUEUE_PURGE = 'queues/%s/%s/contents'
API_QUEUES = 'queues'
API_QUEUES_VIRTUAL_HOST = 'queues/%s'
API_QUEUE_BINDINGS = 'queues/%s/%s/bindings'
API_QUEUE_BIND = 'bindings/%s/e/%s/q/%s'
API_QUEUE_UNBIND = 'bindings/%s/e/%s/q/%s/%s'
class Queue(ManagementHandler):
def get(self, queue, virtual_host='/'):
"""Get Queue details.
:param queue: Queue name
:param str virtual_host: Virtual host name
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: dict
"""
virtual_host = quote(virtual_host, '')
return self.http_client.get(
API_QUEUE % (
virtual_host,
queue
)
)
def list(self, virtual_host='/', show_all=False):
"""List Queues.
:param str virtual_host: Virtual host name
:param bool show_all: List all Queues
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: list
"""
if show_all:
return self.http_client.get(API_QUEUES)
virtual_host = quote(virtual_host, '')
return self.http_client.get(
API_QUEUES_VIRTUAL_HOST % virtual_host
)
def declare(self, queue='', virtual_host='/', passive=False, durable=False,
auto_delete=False, arguments=None):
"""Declare a Queue.
:param str queue: Queue name
:param str virtual_host: Virtual host name
:param bool passive: Do not create
:param bool durable: Durable queue
:param bool auto_delete: Automatically delete when not in use
:param dict|None arguments: Queue key/value arguments
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: dict
"""
virtual_host = quote(virtual_host, '')
if passive:
return self.get(queue, virtual_host=virtual_host)
queue_payload = json.dumps(
{
'durable': durable,
'auto_delete': auto_delete,
'arguments': arguments or {},
'vhost': urlparse.unquote(virtual_host)
}
)
return self.http_client.put(
API_QUEUE % (
virtual_host,
queue
),
payload=queue_payload)
def delete(self, queue, virtual_host='/'):
"""Delete a Queue.
:param str queue: Queue name
:param str virtual_host: Virtual host name
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: dict
"""
virtual_host = quote(virtual_host, '')
return self.http_client.delete(API_QUEUE %
(
virtual_host,
queue
))
def purge(self, queue, virtual_host='/'):
"""Purge a Queue.
:param str queue: Queue name
:param str virtual_host: Virtual host name
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: None
"""
virtual_host = quote(virtual_host, '')
return self.http_client.delete(API_QUEUE_PURGE %
(
virtual_host,
queue
))
def bindings(self, queue, virtual_host='/'):
"""Get Queue bindings.
:param str queue: Queue name
:param str virtual_host: Virtual host name
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: list
"""
virtual_host = quote(virtual_host, '')
return self.http_client.get(API_QUEUE_BINDINGS %
(
virtual_host,
queue
))
def bind(self, queue='', exchange='', routing_key='', virtual_host='/',
arguments=None):
"""Bind a Queue.
:param str queue: Queue name
:param str exchange: Exchange name
:param str routing_key: The routing key to use
:param str virtual_host: Virtual host name
:param dict|None arguments: Bind key/value arguments
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: None
"""
bind_payload = json.dumps({
'destination': queue,
'destination_type': 'q',
'routing_key': routing_key,
'source': exchange,
'arguments': arguments or {},
'vhost': virtual_host
})
virtual_host = quote(virtual_host, '')
return self.http_client.post(API_QUEUE_BIND %
(
virtual_host,
exchange,
queue
),
payload=bind_payload)
def unbind(self, queue='', exchange='', routing_key='', virtual_host='/',
properties_key=None):
"""Unbind a Queue.
:param str queue: Queue name
:param str exchange: Exchange name
:param str routing_key: The routing key to use
:param str virtual_host: Virtual host name
:param str properties_key:
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: None
"""
unbind_payload = json.dumps({
'destination': queue,
'destination_type': 'q',
'properties_key': properties_key or routing_key,
'source': exchange,
'vhost': virtual_host
})
virtual_host = quote(virtual_host, '')
return self.http_client.delete(API_QUEUE_UNBIND %
(
virtual_host,
exchange,
queue,
properties_key or routing_key
),
payload=unbind_payload)
|
1cee38fb8962f5ef55f7d1f42484e734c0caf828
|
6416b746ee71d897789eab1e450000831674dbd0
|
/tests/unit/algorithms/common/adapters/mmcv/tasks/test_exporter.py
|
6544f25be50432a915f8eec60c255f8afd43b1c3
|
[
"Apache-2.0"
] |
permissive
|
openvinotoolkit/training_extensions
|
c921f83ad52311af96ff45ae0b88d0aecddd855b
|
80454808b38727e358e8b880043eeac0f18152fb
|
refs/heads/develop
| 2023-08-31T06:29:07.229339
| 2023-08-31T01:57:26
| 2023-08-31T01:57:26
| 154,843,614
| 397
| 230
|
Apache-2.0
| 2023-09-14T06:17:01
| 2018-10-26T14:02:29
|
Python
|
UTF-8
|
Python
| false
| false
| 2,740
|
py
|
test_exporter.py
|
from otx.api.usecases.tasks.interfaces.export_interface import ExportType
import mmcv
import pytest
from otx.algorithms.common.adapters.mmcv.tasks.exporter import Exporter
from tests.test_suite.e2e_test_system import e2e_pytest_unit
class TestExporter:
@pytest.fixture(autouse=True)
def setup(self, mocker):
def mock_init_logger():
pass
def mock_configure(model_cfg, model_ckpt, data_cfg, training=False, **kwargs):
return mmcv.ConfigDict()
self.exporter = Exporter()
self.exporter._init_logger = mock_init_logger
self.fake_config = mmcv.ConfigDict(work_dir="/path/work_dir", data=dict(test=dict(dataset=mocker.MagicMock())))
mocker.patch("os.listdir")
@e2e_pytest_unit
def test_run_with_error_raise(self):
return_value = self.exporter.run(self.fake_config)
assert "outputs" in return_value
assert return_value["outputs"] is None
assert "msg" in return_value
@e2e_pytest_unit
def test_run_without_deploy_cfg(self, mocker):
def mock_naive_export(output_dir, model_builder, precision, export_type, cfg, model_name="model"):
pass
self.exporter.naive_export = mock_naive_export
return_value = self.exporter.run(self.fake_config)
assert "outputs" in return_value
assert return_value["outputs"]["bin"] == "/path/work_dir/model.bin"
assert return_value["outputs"]["xml"] == "/path/work_dir/model.xml"
assert "msg" in return_value
assert return_value["msg"] == ""
@e2e_pytest_unit
def test_run_with_deploy_cfg(self, mocker):
def mock_mmdeploy_export(
output_dir, model_builder, precision, export_type, cfg, deploy_cfg, model_name="model"
):
pass
self.exporter.mmdeploy_export = mock_mmdeploy_export
return_value = self.exporter.run(self.fake_config, deploy_cfg=mmcv.ConfigDict())
assert "outputs" in return_value
assert return_value["outputs"]["bin"] == "/path/work_dir/model.bin"
assert return_value["outputs"]["xml"] == "/path/work_dir/model.xml"
assert "msg" in return_value
assert return_value["msg"] == ""
@e2e_pytest_unit
def test_mmdeploy_export(self, mocker):
from otx.algorithms.common.adapters.mmdeploy.apis import MMdeployExporter
mock_export_openvino = mocker.patch.object(MMdeployExporter, "export2backend")
Exporter.mmdeploy_export(
"",
None,
"FP16",
ExportType.OPENVINO,
dict(),
mmcv.ConfigDict(backend_config=dict(mo_options=dict(flags=[]))),
)
mock_export_openvino.assert_called_once()
|
88ddad710ae7862b775d0c75047fdc6c1954e57b
|
ffde62a49fe3dab3d72ddcd33c1d3e1f4c0d64b6
|
/powerful_benchmarker/delete_experiment.py
|
775a8b85872b614f52a7027559f7957f668888a5
|
[] |
no_license
|
KevinMusgrave/powerful-benchmarker
|
93403e1fae46cfdae2983c2e1c1a95da7b628021
|
528117d43790e1d155f581ac509aed5e505feff7
|
refs/heads/domain-adaptation
| 2023-07-16T14:05:47.124839
| 2023-06-15T02:13:08
| 2023-06-15T02:13:08
| 220,663,367
| 399
| 45
| null | 2023-04-09T23:29:51
| 2019-11-09T15:36:55
|
Python
|
UTF-8
|
Python
| false
| false
| 1,134
|
py
|
delete_experiment.py
|
import argparse
import glob
import os
import shutil
import sys
sys.path.insert(0, ".")
from powerful_benchmarker.utils.constants import add_default_args
from validator_tests.utils import utils
from validator_tests.utils.constants import add_exp_group_args
def delete_experiment(cfg, exp_group):
experiment_paths = sorted(glob.glob(f"{cfg.exp_folder}/{exp_group}/*"))
num_folders = 0
for p in experiment_paths:
if not os.path.isdir(p):
continue
if os.path.basename(p) == cfg.adapter:
num_folders += 1
if cfg.delete:
print("deleting", p)
shutil.rmtree(p)
print("num_folders", num_folders)
def main(cfg):
exp_groups = utils.get_exp_groups(cfg)
for e in exp_groups:
delete_experiment(cfg, e)
if __name__ == "__main__":
parser = argparse.ArgumentParser(allow_abbrev=False)
add_default_args(parser, ["exp_folder"])
add_exp_group_args(parser)
parser.add_argument("--adapter", type=str, default="")
parser.add_argument("--delete", action="store_true")
args = parser.parse_args()
main(args)
|
f6351876ff6b9d429f627d97a98ab23d379f1138
|
80a3d98eae1d755d6914b5cbde63fd10f5cc2046
|
/autox/autox_video/mmaction2/tests/test_models/test_recognizers/test_skeletongcn.py
|
063a090214d43e390ef8a81618a583a02069bdc5
|
[
"Apache-2.0"
] |
permissive
|
4paradigm/AutoX
|
efda57b51b586209e1d58e1dab7d0797083aadc5
|
7eab9f4744329a225ff01bb5ec360c4662e1e52e
|
refs/heads/master
| 2023-05-24T00:53:37.109036
| 2023-02-14T14:21:50
| 2023-02-14T14:21:50
| 388,068,949
| 752
| 162
|
Apache-2.0
| 2022-07-12T08:28:09
| 2021-07-21T09:45:41
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,749
|
py
|
test_skeletongcn.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmaction.models import build_recognizer
from ..base import generate_recognizer_demo_inputs, get_skeletongcn_cfg
def test_skeletongcn():
config = get_skeletongcn_cfg('stgcn/stgcn_80e_ntu60_xsub_keypoint.py')
with pytest.raises(TypeError):
# "pretrained" must be a str or None
config.model['backbone']['pretrained'] = ['None']
recognizer = build_recognizer(config.model)
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model)
input_shape = (1, 3, 300, 17, 2)
demo_inputs = generate_recognizer_demo_inputs(input_shape, 'skeleton')
skeletons = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
losses = recognizer(skeletons, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
skeleton_list = [skeleton[None, :] for skeleton in skeletons]
for one_skeleton in skeleton_list:
recognizer(one_skeleton, None, return_loss=False)
# test stgcn without edge importance weighting
config.model['backbone']['edge_importance_weighting'] = False
recognizer = build_recognizer(config.model)
input_shape = (1, 3, 300, 17, 2)
demo_inputs = generate_recognizer_demo_inputs(input_shape, 'skeleton')
skeletons = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
losses = recognizer(skeletons, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
skeleton_list = [skeleton[None, :] for skeleton in skeletons]
for one_skeleton in skeleton_list:
recognizer(one_skeleton, None, return_loss=False)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.