id int64 0 300k | label stringlengths 1 74 ⌀ | text stringlengths 4k 8k |
|---|---|---|
6,300 | dot product | # This file is part of project Sverchok. It's copyrighted by the contributors
# recorded in the version control history of the file, available from
# its original location https://github.com/nortikin/sverchok/commit/master
#
# SPDX-License-Identifier: GPL3
# License-Filename: LICENSE
"""
This module is for geometry functions handle meshes in 2D space ("XY" surface) mostly.
"""
x, y, z = 0, 1, 2
def almost_equal(v1, v2, accuracy=1e-6):
"""
Compare floating values
:param v1: float
:param v2: float
:param accuracy: two floats figures are equal if their difference is lower then accuracy value, float
:return: True if values are equal else False
"""
return abs(v1 - v2) < accuracy
def is_less(v1, v2, epsilon=1e-6):
"""
Compare floating values
:param v1: float
:param v2: float
:param epsilon: two floats figures are equal if their difference is lower then accuracy value, float
:return: True if v1 is less then v2
"""
return v2 - v1 > epsilon
def is_more(v1, v2, epsilon=1e-6):
"""
Compare floating values
:param v1: float
:param v2: float
:param epsilon: two floats figures are equal if their difference is lower then accuracy value, float
:return: True if v1 is more then v2
"""
return v1 - v2 > epsilon
def cross_product(v1, v2):
"""
Cross product of two any dimension vectors
:param v1: any massive
:param v2: any massive
:return: list
"""
out = []
length = len(v1)
for i in range(length):
out.append(v1[(i + 1) % length] * v2[(i + 2) % length] - v1[(i + 2) % length] * v2[(i + 1) % length])
return out
def METHOD_NAME(v1, v2):
"""
Calculate dot product of two vectors
:param v1: massive of any length
:param v2: massive of any length
:return: float
"""
out = 0
for co1, co2 in zip(v1, v2):
out += co1 * co2
return out
def convert_homogeneous_to_cartesian(v):
"""
Convert from homogeneous to cartesian system coordinate
:param v: massive of any length
:return: list
"""
w = v[-1]
out = []
for s in v[:-1]:
out.append(s / w)
return out
def is_ccw(a, b, c):
"""
Tests whether the turn formed by A, B, and C is counter clockwise
:param a: 2d point - any massive
:param b: 2d point - any massive
:param c: 2d point - any massive
:return: True if turn is counter clockwise else False
"""
return (b[x] - a[x]) * (c[y] - a[y]) > (b[y] - a[y]) * (c[x] - a[x])
def is_ccw_polygon(all_verts=None, most_lefts=None, accuracy=1e-6):
"""
The function get either all points or most left point and its two neighbours of the polygon
and returns True if order of points are in counterclockwise
:param all_verts: [(x, y, z) or (x, y), ...]
:param most_lefts: [(x, y, z) or (x, y), ...]
:param accuracy: two floats figures are equal if their difference is lower then accuracy value, float
:return: bool
"""
def is_vertical(points, accuracy=1e-6):
# is 3 most left points vertical
if almost_equal(points[0][x], points[1][x], accuracy) and almost_equal(points[0][x], points[2][x], accuracy):
return True
else:
return False
if all([all_verts, most_lefts]) or not any([all_verts, most_lefts]):
raise ValueError('The function get either all points or most left point and its two neighbours of the polygon.')
if all_verts:
x_min = min(range(len(all_verts)), key=lambda i: all_verts[i][x])
most_lefts = [all_verts[(x_min - 1) % len(all_verts)], all_verts[x_min],
all_verts[(x_min + 1) % len(all_verts)]]
if is_vertical(most_lefts, accuracy):
# here is handled corner case when most left points are vertical
return True if most_lefts[0][y] > most_lefts[1][y] else False
else:
return True if is_ccw(*most_lefts) else False
def is_edges_intersect(a1, b1, a2, b2):
"""
Returns True if line segments a1b1 and a2b2 intersect
If point of one edge lays on another edge this recognize like intersection
:param a1: first 2d point of fist segment - any massive
:param b1: second 2d point of fist segment - any massive
:param a2: first 2d point of second segment - any massive
:param b2: second 2d point of second segment - any massive
:return: True if edges are intersected else False
"""
return ((is_ccw(a1, b1, a2) != is_ccw(a1, b1, b2) or is_ccw(b1, a1, a2) != is_ccw(b1, a1, b2)) and
(is_ccw(a2, b2, a1) != is_ccw(a2, b2, b1) or is_ccw(b2, a2, a1) != is_ccw(b2, a2, b1)))
def intersect_edges(a1, a2, b1, b2, to_project=False, accuracy=1e-5):
"""
Find intersection of two lines determined by two coordinates
:param a1: point 1 of line a - any massive
:param a2: point 2 of line a - any massive
:param b1: point 1 of line b - any massive
:param b2: point 2 of line b - any massive
:param to_project: to project intersection point back into first edge - bool
:param accuracy: two floats figures are equal if their difference is lower then accuracy value, float
:return: returns intersection point (list) if lines are not parallel else returns False
"""
def project_point(e1, e2, p, accuracy):
if almost_equal(e1[z], e2[z], accuracy):
return p[x], p[y], e1[z]
ev = [co1 - co2 for co1, co2 in zip(e1, e2)]
pv = [cop - co2 for cop, co2 in zip(p, e2)]
dz = ev[z]
ev = [ev[x], ev[y], 0]
pv = [pv[x], pv[y], 0]
pow_len_ev = sum([co ** 2 for co in ev]) ** 0.5
pow_len_pv = sum([co ** 2 for co in pv]) ** 0.5
pz = e2[z] + (dz * (pow_len_pv / pow_len_ev))
return p[x], p[y], pz
cross_a = cross_product((a1[x], a1[y], 1), (a2[x], a2[y], 1))
cross_b = cross_product((b1[x], b1[y], 1), (b2[x], b2[y], 1))
hom_v = cross_product(cross_a, cross_b)
if hom_v[2] != 0:
intersect = convert_homogeneous_to_cartesian(hom_v)
if to_project:
return project_point(a1, a2, intersect, accuracy)
else:
return intersect
elif not any(hom_v):
return False # two lines ara overlapping
else:
return False # two lines are parallel |
6,301 | test grid from radars gates to grid | """ Unit Tests for Py-ART's map/gates_to_grid.py. """
import numpy as np
import pytest
from numpy.testing import assert_almost_equal
import pyart
EXPECTED_CENTER_SLICE = [40, 30, 20, 10, 0, 0, 10, 20, 30, 40]
COMMON_MAP_TO_GRID_ARGS = {
"grid_shape": (3, 9, 10),
"grid_limits": ((-400.0, 400.0), (-900.0, 900.0), (-900, 900)),
"fields": None,
"roi_func": "constant",
"constant_roi": 30.0,
}
def test_map_to_grid_filter():
# simulate a radar with bad gates which reports huge reflectivities
radar = pyart.testing.make_target_radar()
radar.fields["reflectivity"]["data"][0:100, 25] = 99999.0
# without filtering bad gates leaks through
gatefilter = pyart.filters.GateFilter(radar)
grids = pyart.map.map_gates_to_grid(
(radar,), gatefilters=(gatefilter,), **COMMON_MAP_TO_GRID_ARGS
)
assert grids["reflectivity"].max() > 41.0
# with filtering bad gates is supressed
gatefilter = pyart.filters.GateFilter(radar)
gatefilter.exclude_above("reflectivity", 41.0)
grids = pyart.map.map_gates_to_grid(
(radar,), gatefilters=(gatefilter,), **COMMON_MAP_TO_GRID_ARGS
)
assert grids["reflectivity"].max() < 41.0
def test_map_to_grid_non_tuple():
radar = pyart.testing.make_target_radar()
grids = pyart.map.map_gates_to_grid(radar, **COMMON_MAP_TO_GRID_ARGS)
center_slice = grids["reflectivity"][1, 4, :]
assert_almost_equal(np.round(center_slice), EXPECTED_CENTER_SLICE)
def test_map_to_grid_default():
radar = pyart.testing.make_target_radar()
grids = pyart.map.map_gates_to_grid((radar,), **COMMON_MAP_TO_GRID_ARGS)
center_slice = grids["reflectivity"][1, 4, :]
assert_almost_equal(np.round(center_slice), EXPECTED_CENTER_SLICE)
def test_map_to_grid_cressman():
radar = pyart.testing.make_target_radar()
grids = pyart.map.map_gates_to_grid(
(radar,),
(3, 9, 10),
((-400.0, 400.0), (-900.0, 900.0), (-900, 900)),
roi_func="constant",
constant_roi=30.0,
weighting_function="CRESSMAN",
)
center_slice = grids["reflectivity"][1, 4, :]
assert_almost_equal(np.round(center_slice), EXPECTED_CENTER_SLICE)
def test_map_to_grid_constant_roi():
radar = pyart.testing.make_target_radar()
grids = pyart.map.map_gates_to_grid(
(radar,),
(3, 9, 10),
((-400.0, 400.0), (-900.0, 900.0), (-900, 900)),
roi_func="constant",
constant_roi=30.0,
)
center_slice = grids["reflectivity"][1, 4, :]
assert_almost_equal(np.round(center_slice), EXPECTED_CENTER_SLICE)
def test_map_to_grid_dist_roi():
radar = pyart.testing.make_target_radar()
grids = pyart.map.map_gates_to_grid(
(radar,),
(3, 9, 10),
((-400.0, 400.0), (-900.0, 900.0), (-900, 900)),
roi_func="dist",
z_factor=0,
xy_factor=0,
min_radius=30.0,
)
center_slice = grids["reflectivity"][1, 4, :]
assert_almost_equal(np.round(center_slice), EXPECTED_CENTER_SLICE)
def test_map_to_grid_dist_beam_roi():
radar = pyart.testing.make_target_radar()
grids = pyart.map.map_gates_to_grid(
(radar,),
grid_shape=(3, 9, 10),
grid_limits=((-400.0, 400.0), (-900.0, 900.0), (-900, 900)),
fields=["reflectivity"],
min_radius=30,
bsp=0.0,
h_factor=0.0,
)
center_slice = grids["reflectivity"][1, 4, :]
assert_almost_equal(np.round(center_slice), EXPECTED_CENTER_SLICE)
def test_map_to_grid_default_two_radars():
radar = pyart.testing.make_target_radar()
grids = pyart.map.map_gates_to_grid((radar, radar), **COMMON_MAP_TO_GRID_ARGS)
center_slice = grids["reflectivity"][1, 4, :]
assert_almost_equal(np.round(center_slice), EXPECTED_CENTER_SLICE)
def test_map_to_grid_masked_refl_field():
radar = pyart.testing.make_target_radar()
# mask the last gate of the first ray
fdata = radar.fields["reflectivity"]["data"]
fdata = np.ma.masked_invalid(fdata)
fdata.mask = False
fdata.mask[0, -1] = [True]
radar.fields["reflectivity"]["data"] = fdata
grids = pyart.map.map_gates_to_grid((radar,), **COMMON_MAP_TO_GRID_ARGS)
center_slice = grids["reflectivity"][1, 4, :]
assert_almost_equal(np.round(center_slice), EXPECTED_CENTER_SLICE)
def test_map_to_grid_tiny_grid():
radar = pyart.testing.make_target_radar()
grids = pyart.map.map_gates_to_grid(
(radar,),
grid_shape=(1, 1, 1),
grid_limits=((-400.0, 400.0), (-900.0, 900.0), (-900, 900)),
fields=["reflectivity"],
)
assert grids["reflectivity"].shape == (1, 1, 1)
assert abs(np.round(grids["reflectivity"][0]) - 40.0) < 0.01
def METHOD_NAME():
radar = pyart.testing.make_target_radar()
grid = pyart.map.grid_from_radars(
(radar,), gridding_algo="map_gates_to_grid", **COMMON_MAP_TO_GRID_ARGS
)
# check field data
center_slice = grid.fields["reflectivity"]["data"][1, 4, :]
assert_almost_equal(np.round(center_slice), EXPECTED_CENTER_SLICE)
# check other Grid object attributes
assert "ROI" in grid.fields
assert np.all(grid.fields["ROI"]["data"] == 30.0)
assert_almost_equal(grid.x["data"], np.linspace(-900, 900, 10))
assert_almost_equal(grid.y["data"], np.linspace(-900, 900, 9))
assert_almost_equal(grid.z["data"], np.linspace(-400, 400, 3))
def test_map_to_grid_errors():
radar = pyart.testing.make_target_radar()
# invalid weighting_function
pytest.raises(
ValueError,
pyart.map.map_gates_to_grid,
(radar,),
(1, 1, 1),
((-1, 1), (-1, 1), (-1, 1)),
weighting_function="foo",
)
# invalid roi_func
pytest.raises(
ValueError,
pyart.map.map_gates_to_grid,
(radar,),
(1, 1, 1),
((-1, 1), (-1, 1), (-1, 1)),
roi_func="foo",
)
# missing reflectivity field
radar.fields.pop("reflectivity")
pytest.raises(
ValueError,
pyart.map.map_gates_to_grid,
(radar,),
(1, 1, 1),
((-1, 1), (-1, 1), (-1, 1)),
)
def test_grid_from_radars():
radar = pyart.testing.make_target_radar()
grid = pyart.map.grid_from_radars((radar,), **COMMON_MAP_TO_GRID_ARGS)
# check field data
center_slice = grid.fields["reflectivity"]["data"][1, 4, :]
assert_almost_equal(np.round(center_slice), EXPECTED_CENTER_SLICE)
# check other Grid object attributes
assert "ROI" in grid.fields
assert np.all(grid.fields["ROI"]["data"] == 30.0)
assert_almost_equal(grid.x["data"], np.linspace(-900, 900, 10))
assert_almost_equal(grid.y["data"], np.linspace(-900, 900, 9))
assert_almost_equal(grid.z["data"], np.linspace(-400, 400, 3))
def test_grid_from_radars_grid_origin():
radar = pyart.testing.make_target_radar()
radar.metadata.pop("instrument_name")
grid = pyart.map.grid_from_radars(
(radar,), grid_origin=(36.4, -97.6), **COMMON_MAP_TO_GRID_ARGS
)
assert_almost_equal(grid.origin_latitude["data"][0], 36.4, 1)
assert_almost_equal(grid.origin_longitude["data"][0], -97.6, 1)
def test_example_roi_funcs():
assert pyart.map.example_roi_func_constant(0, 0, 0) == 500.0
assert pyart.map.example_roi_func_dist(0, 0, 0) == 500.0
assert pyart.map.example_roi_func_dist_beam(0, 0, 0) == 500.0 |
6,302 | test text to cell py2 | import pytest
from nbformat.v4.nbbase import new_markdown_cell
from jupytext.cell_reader import (
LightScriptCellReader,
RMarkdownCellReader,
paragraph_is_fully_commented,
uncomment,
)
from jupytext.cell_to_text import RMarkdownCellExporter
@pytest.mark.parametrize(
"lines",
[
"# text",
"""# # %%R
# # comment
# 1 + 1
# 2 + 2
""",
],
)
def test_paragraph_is_fully_commented(lines):
assert paragraph_is_fully_commented(
lines.splitlines(), comment="#", main_language="python"
)
def test_paragraph_is_not_fully_commented(lines="# text\nnot fully commented out"):
assert not paragraph_is_fully_commented(
lines.splitlines(), comment="#", main_language="python"
)
def test_uncomment():
assert uncomment(["# line one", "#line two", "line three"], "#") == [
"line one",
"line two",
"line three",
]
assert uncomment(["# line one", "#line two", "line three"], "") == [
"# line one",
"#line two",
"line three",
]
def test_text_to_code_cell():
text = """```{python}
1+2+3
```
"""
lines = text.splitlines()
cell, pos = RMarkdownCellReader().read(lines)
assert cell.cell_type == "code"
assert cell.source == "1+2+3"
assert cell.metadata == {"language": "python"}
assert lines[pos:] == []
def test_text_to_code_cell_empty_code():
text = """```{python}
```
"""
lines = text.splitlines()
cell, pos = RMarkdownCellReader().read(lines)
assert cell.cell_type == "code"
assert cell.source == ""
assert cell.metadata == {"language": "python"}
assert lines[pos:] == []
def test_text_to_code_cell_empty_code_no_blank_line():
text = """```{python}
```
"""
lines = text.splitlines()
cell, pos = RMarkdownCellReader().read(lines)
assert cell.cell_type == "code"
assert cell.source == ""
assert cell.metadata == {"language": "python"}
assert lines[pos:] == []
def test_text_to_markdown_cell():
text = """This is
a markdown cell
```{python}
1+2+3
```
"""
lines = text.splitlines()
cell, pos = RMarkdownCellReader().read(lines)
assert cell.cell_type == "markdown"
assert cell.source == "This is\na markdown cell"
assert cell.metadata == {}
assert pos == 3
def test_text_to_markdown_no_blank_line():
text = """This is
a markdown cell
```{python}
1+2+3
```
"""
lines = text.splitlines()
cell, pos = RMarkdownCellReader().read(lines)
assert cell.cell_type == "markdown"
assert cell.source == "This is\na markdown cell"
assert cell.metadata == {"lines_to_next_cell": 0}
assert pos == 2
def test_text_to_markdown_two_blank_line():
text = """
```{python}
1+2+3
```
"""
lines = text.splitlines()
cell, pos = RMarkdownCellReader().read(lines)
assert cell.cell_type == "markdown"
assert cell.source == ""
assert cell.metadata == {}
assert pos == 2
def test_text_to_markdown_one_blank_line():
text = """
```{python}
1+2+3
```
"""
lines = text.splitlines()
cell, pos = RMarkdownCellReader().read(lines)
assert cell.cell_type == "markdown"
assert cell.source == ""
assert cell.metadata == {"lines_to_next_cell": 0}
assert pos == 1
def test_empty_markdown_to_text():
cell = new_markdown_cell(source="")
text = RMarkdownCellExporter(cell, "python").cell_to_text()
assert text == [""]
def test_text_to_cell_py():
text = "1+1\n"
lines = text.splitlines()
cell, pos = LightScriptCellReader().read(lines)
assert cell.cell_type == "code"
assert cell.source == "1+1"
assert cell.metadata == {}
assert pos == 1
def METHOD_NAME():
text = """def f(x):
return x+1"""
lines = text.splitlines()
cell, pos = LightScriptCellReader().read(lines)
assert cell.cell_type == "code"
assert cell.source == """def f(x):\n return x+1"""
assert cell.metadata == {}
assert pos == 2
def test_code_to_cell():
text = """def f(x):
return x+1"""
lines = text.splitlines()
cell, pos = LightScriptCellReader().read(lines)
assert cell.cell_type == "code"
assert cell.source == """def f(x):\n return x+1"""
assert cell.metadata == {}
assert pos == 2
def test_uncomment_ocaml():
assert uncomment(["(* ## *)"], "(*", "*)") == ["##"]
assert uncomment(["(*##*)"], "(*", "*)") == ["##"] |
6,303 | get cpuinfo item | import sys, platform, re, pytest
from numpy.core._multiarray_umath import __cpu_features__
def assert_features_equal(actual, desired, fname):
__tracebackhide__ = True # Hide traceback for py.test
actual, desired = str(actual), str(desired)
if actual == desired:
return
detected = str(__cpu_features__).replace("'", "")
try:
with open("/proc/cpuinfo", "r") as fd:
cpuinfo = fd.read(2048)
except Exception as err:
cpuinfo = str(err)
try:
import subprocess
auxv = subprocess.check_output(['/bin/true'], env=dict(LD_SHOW_AUXV="1"))
auxv = auxv.decode()
except Exception as err:
auxv = str(err)
import textwrap
error_report = textwrap.indent(
"""
###########################################
### Extra debugging information
###########################################
-------------------------------------------
--- NumPy Detections
-------------------------------------------
%s
-------------------------------------------
--- SYS / CPUINFO
-------------------------------------------
%s....
-------------------------------------------
--- SYS / AUXV
-------------------------------------------
%s
""" % (detected, cpuinfo, auxv), prefix='\r')
raise AssertionError((
"Failure Detection\n"
" NAME: '%s'\n"
" ACTUAL: %s\n"
" DESIRED: %s\n"
"%s"
) % (fname, actual, desired, error_report))
class AbstractTest:
features = []
features_groups = {}
features_map = {}
features_flags = set()
def load_flags(self):
# a hook
pass
def test_features(self):
self.load_flags()
for gname, features in self.features_groups.items():
test_features = [self.cpu_have(f) for f in features]
assert_features_equal(__cpu_features__.get(gname), all(test_features), gname)
for feature_name in self.features:
cpu_have = self.cpu_have(feature_name)
npy_have = __cpu_features__.get(feature_name)
assert_features_equal(npy_have, cpu_have, feature_name)
def cpu_have(self, feature_name):
map_names = self.features_map.get(feature_name, feature_name)
if isinstance(map_names, str):
return map_names in self.features_flags
for f in map_names:
if f in self.features_flags:
return True
return False
def load_flags_cpuinfo(self, magic_key):
self.features_flags = self.METHOD_NAME(magic_key)
def METHOD_NAME(self, magic_key):
values = set()
with open('/proc/cpuinfo') as fd:
for line in fd:
if not line.startswith(magic_key):
continue
flags_value = [s.strip() for s in line.split(':', 1)]
if len(flags_value) == 2:
values = values.union(flags_value[1].upper().split())
return values
def load_flags_auxv(self):
import subprocess
auxv = subprocess.check_output(['/bin/true'], env=dict(LD_SHOW_AUXV="1"))
for at in auxv.split(b'\n'):
if not at.startswith(b"AT_HWCAP"):
continue
hwcap_value = [s.strip() for s in at.split(b':', 1)]
if len(hwcap_value) == 2:
self.features_flags = self.features_flags.union(
hwcap_value[1].upper().decode().split()
)
is_linux = sys.platform.startswith('linux')
is_cygwin = sys.platform.startswith('cygwin')
machine = platform.machine()
is_x86 = re.match("^(amd64|x86|i386|i686)", machine, re.IGNORECASE)
@pytest.mark.skipif(
not (is_linux or is_cygwin) or not is_x86, reason="Only for Linux and x86"
)
class Test_X86_Features(AbstractTest):
features = [
"MMX", "SSE", "SSE2", "SSE3", "SSSE3", "SSE41", "POPCNT", "SSE42",
"AVX", "F16C", "XOP", "FMA4", "FMA3", "AVX2", "AVX512F", "AVX512CD",
"AVX512ER", "AVX512PF", "AVX5124FMAPS", "AVX5124VNNIW", "AVX512VPOPCNTDQ",
"AVX512VL", "AVX512BW", "AVX512DQ", "AVX512VNNI", "AVX512IFMA",
"AVX512VBMI", "AVX512VBMI2", "AVX512BITALG",
]
features_groups = dict(
AVX512_KNL = ["AVX512F", "AVX512CD", "AVX512ER", "AVX512PF"],
AVX512_KNM = ["AVX512F", "AVX512CD", "AVX512ER", "AVX512PF", "AVX5124FMAPS",
"AVX5124VNNIW", "AVX512VPOPCNTDQ"],
AVX512_SKX = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL"],
AVX512_CLX = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512VNNI"],
AVX512_CNL = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512IFMA",
"AVX512VBMI"],
AVX512_ICL = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512IFMA",
"AVX512VBMI", "AVX512VNNI", "AVX512VBMI2", "AVX512BITALG", "AVX512VPOPCNTDQ"],
)
features_map = dict(
SSE3="PNI", SSE41="SSE4_1", SSE42="SSE4_2", FMA3="FMA",
AVX512VNNI="AVX512_VNNI", AVX512BITALG="AVX512_BITALG", AVX512VBMI2="AVX512_VBMI2",
AVX5124FMAPS="AVX512_4FMAPS", AVX5124VNNIW="AVX512_4VNNIW", AVX512VPOPCNTDQ="AVX512_VPOPCNTDQ",
)
def load_flags(self):
self.load_flags_cpuinfo("flags")
is_power = re.match("^(powerpc|ppc)64", machine, re.IGNORECASE)
@pytest.mark.skipif(not is_linux or not is_power, reason="Only for Linux and Power")
class Test_POWER_Features(AbstractTest):
features = ["VSX", "VSX2", "VSX3", "VSX4"]
features_map = dict(VSX2="ARCH_2_07", VSX3="ARCH_3_00", VSX4="ARCH_3_1")
def load_flags(self):
self.load_flags_auxv()
is_zarch = re.match("^(s390x)", machine, re.IGNORECASE)
@pytest.mark.skipif(not is_linux or not is_zarch,
reason="Only for Linux and IBM Z")
class Test_ZARCH_Features(AbstractTest):
features = ["VX", "VXE", "VXE2"]
def load_flags(self):
self.load_flags_auxv()
is_arm = re.match("^(arm|aarch64)", machine, re.IGNORECASE)
@pytest.mark.skipif(not is_linux or not is_arm, reason="Only for Linux and ARM")
class Test_ARM_Features(AbstractTest):
features = [
"NEON", "ASIMD", "FPHP", "ASIMDHP", "ASIMDDP", "ASIMDFHM"
]
features_groups = dict(
NEON_FP16 = ["NEON", "HALF"],
NEON_VFPV4 = ["NEON", "VFPV4"],
)
def load_flags(self):
self.load_flags_cpuinfo("Features")
arch = self.METHOD_NAME("CPU architecture")
# in case of mounting virtual filesystem of aarch64 kernel
is_rootfs_v8 = int('0'+next(iter(arch))) > 7 if arch else 0
if re.match("^(aarch64|AARCH64)", machine) or is_rootfs_v8:
self.features_map = dict(
NEON="ASIMD", HALF="ASIMD", VFPV4="ASIMD"
)
else:
self.features_map = dict(
# ELF auxiliary vector and /proc/cpuinfo on Linux kernel(armv8 aarch32)
# doesn't provide information about ASIMD, so we assume that ASIMD is supported
# if the kernel reports any one of the following ARM8 features.
ASIMD=("AES", "SHA1", "SHA2", "PMULL", "CRC32")
) |
6,304 | test create bom | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from pathlib import Path
from string import Template
from unittest.mock import patch
import mozpack.pkg
import mozunit
from mozpack.pkg import (
create_bom,
create_payload,
create_pkg,
get_app_info_plist,
get_apple_template,
get_relative_glob_list,
save_text_file,
xar_package_folder,
)
from mozpack.test.test_files import TestWithTmpDir
class TestPkg(TestWithTmpDir):
maxDiff = None
class MockSubprocessRun:
stderr = ""
stdout = ""
returncode = 0
def __init__(self, returncode=0):
self.returncode = returncode
def _mk_test_file(self, name, mode=0o777):
tool = Path(self.tmpdir) / f"{name}"
tool.touch()
tool.chmod(mode)
return tool
def test_get_apple_template(self):
tmpl = get_apple_template("Distribution.template")
assert type(tmpl) == Template
def test_get_apple_template_not_file(self):
with self.assertRaises(Exception):
get_apple_template("tmpl-should-not-exist")
def test_save_text_file(self):
content = "Hello"
destination = Path(self.tmpdir) / "test_save_text_file"
save_text_file(content, destination)
with destination.open("r") as file:
assert content == file.read()
def test_get_app_info_plist(self):
app_path = Path(self.tmpdir) / "app"
(app_path / "Contents").mkdir(parents=True)
(app_path / "Contents/Info.plist").touch()
data = {"foo": "bar"}
with patch.object(mozpack.pkg.plistlib, "load", lambda x: data):
assert data == get_app_info_plist(app_path)
def test_get_app_info_plist_not_file(self):
app_path = Path(self.tmpdir) / "app-does-not-exist"
with self.assertRaises(Exception):
get_app_info_plist(app_path)
def _mock_payload(self, returncode):
def _mock_run(*args, **kwargs):
return self.MockSubprocessRun(returncode)
return _mock_run
def test_create_payload(self):
destination = Path(self.tmpdir) / "mockPayload"
with patch.object(mozpack.pkg.subprocess, "run", self._mock_payload(0)):
create_payload(destination, Path(self.tmpdir), "cpio")
def METHOD_NAME(self):
bom_path = Path(self.tmpdir) / "Bom"
bom_path.touch()
root_path = Path(self.tmpdir)
tool_path = Path(self.tmpdir) / "not-really-used-during-test"
with patch.object(mozpack.pkg.subprocess, "check_call", lambda *x: None):
create_bom(bom_path, root_path, tool_path)
def get_relative_glob_list(self):
source = Path(self.tmpdir)
(source / "testfile").touch()
glob = "*"
assert len(get_relative_glob_list(source, glob)) == 1
def test_xar_package_folder(self):
source = Path(self.tmpdir)
dest = source / "fakedestination"
dest.touch()
tool = source / "faketool"
with patch.object(mozpack.pkg.subprocess, "check_call", lambda *x, **y: None):
xar_package_folder(source, dest, tool)
def test_xar_package_folder_not_absolute(self):
source = Path("./some/relative/path")
dest = Path("./some/other/relative/path")
tool = source / "faketool"
with patch.object(mozpack.pkg.subprocess, "check_call", lambda: None):
with self.assertRaises(Exception):
xar_package_folder(source, dest, tool)
def test_create_pkg(self):
def noop(*x, **y):
pass
def mock_get_app_info_plist(*args):
return {"CFBundleShortVersionString": "1.0.0"}
def mock_get_apple_template(*args):
return Template("fake template")
source = Path(self.tmpdir) / "FakeApp.app"
source.mkdir()
output = Path(self.tmpdir) / "output.pkg"
fake_tool = Path(self.tmpdir) / "faketool"
with patch.multiple(
mozpack.pkg,
get_app_info_plist=mock_get_app_info_plist,
get_apple_template=mock_get_apple_template,
save_text_file=noop,
create_payload=noop,
create_bom=noop,
xar_package_folder=noop,
):
create_pkg(source, output, fake_tool, fake_tool, fake_tool)
if __name__ == "__main__":
mozunit.main() |
6,305 | sample pose func | import blenderproc as bproc
import argparse
import os
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('bop_parent_path', help="Path to the bop datasets parent directory")
parser.add_argument('cc_textures_path', default="resources/cctextures", help="Path to downloaded cc textures")
parser.add_argument('output_dir', help="Path to where the final files will be saved ")
parser.add_argument('--num_scenes', type=int, default=2000, help="How many scenes with 25 images each to generate")
args = parser.parse_args()
bproc.init()
# load bop objects into the scene
target_bop_objs = bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, 'hb'), mm2m = True)
# load distractor bop objects
tless_dist_bop_objs = bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, 'tless'), model_type = 'cad', mm2m = True)
ycbv_dist_bop_objs = bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, 'ycbv'), mm2m = True)
tyol_dist_bop_objs = bproc.loader.load_bop_objs(bop_dataset_path = os.path.join(args.bop_parent_path, 'tyol'), mm2m = True)
# load BOP datset intrinsics
bproc.loader.load_bop_intrinsics(bop_dataset_path = os.path.join(args.bop_parent_path, 'hb'))
# set shading and hide objects
for obj in (target_bop_objs + tless_dist_bop_objs + ycbv_dist_bop_objs + tyol_dist_bop_objs):
obj.set_shading_mode('auto')
obj.hide(True)
# create room
room_planes = [bproc.object.create_primitive('PLANE', scale=[2, 2, 1]),
bproc.object.create_primitive('PLANE', scale=[2, 2, 1], location=[0, -2, 2], rotation=[-1.570796, 0, 0]),
bproc.object.create_primitive('PLANE', scale=[2, 2, 1], location=[0, 2, 2], rotation=[1.570796, 0, 0]),
bproc.object.create_primitive('PLANE', scale=[2, 2, 1], location=[2, 0, 2], rotation=[0, -1.570796, 0]),
bproc.object.create_primitive('PLANE', scale=[2, 2, 1], location=[-2, 0, 2], rotation=[0, 1.570796, 0])]
for plane in room_planes:
plane.enable_rigidbody(False, collision_shape='BOX', mass=1.0, friction = 100.0, linear_damping = 0.99, angular_damping = 0.99)
# sample light color and strenght from ceiling
light_plane = bproc.object.create_primitive('PLANE', scale=[3, 3, 1], location=[0, 0, 10])
light_plane.set_name('light_plane')
light_plane_material = bproc.material.create('light_material')
# sample point light on shell
light_point = bproc.types.Light()
light_point.set_energy(200)
# load cc_textures
cc_textures = bproc.loader.load_ccmaterials(args.cc_textures_path)
# Define a function that samples 6-DoF poses
def METHOD_NAME(obj: bproc.types.MeshObject):
min = np.random.uniform([-0.3, -0.3, 0.0], [-0.2, -0.2, 0.0])
max = np.random.uniform([0.2, 0.2, 0.4], [0.3, 0.3, 0.6])
obj.set_location(np.random.uniform(min, max))
obj.set_rotation_euler(bproc.sampler.uniformSO3())
# activate depth rendering without antialiasing and set amount of samples for color rendering
bproc.renderer.enable_depth_output(activate_antialiasing=False)
bproc.renderer.set_max_amount_of_samples(50)
for i in range(args.num_scenes):
# Sample bop objects for a scene
sampled_target_bop_objs = list(np.random.choice(target_bop_objs, size=20, replace=False))
sampled_distractor_bop_objs = list(np.random.choice(tless_dist_bop_objs, size=2, replace=False))
sampled_distractor_bop_objs += list(np.random.choice(ycbv_dist_bop_objs, size=2, replace=False))
sampled_distractor_bop_objs += list(np.random.choice(tyol_dist_bop_objs, size=2, replace=False))
# Randomize materials and set physics
for obj in (sampled_target_bop_objs + sampled_distractor_bop_objs):
mat = obj.get_materials()[0]
if obj.get_cp("bop_dataset_name") in ['itodd', 'tless']:
grey_col = np.random.uniform(0.1, 0.9)
mat.set_principled_shader_value("Base Color", [grey_col, grey_col, grey_col, 1])
mat.set_principled_shader_value("Roughness", np.random.uniform(0, 1.0))
mat.set_principled_shader_value("Specular", np.random.uniform(0, 1.0))
obj.enable_rigidbody(True, mass=1.0, friction = 100.0, linear_damping = 0.99, angular_damping = 0.99)
obj.hide(False)
# Sample two light sources
light_plane_material.make_emissive(emission_strength=np.random.uniform(3,6),
emission_color=np.random.uniform([0.5, 0.5, 0.5, 1.0], [1.0, 1.0, 1.0, 1.0]))
light_plane.replace_materials(light_plane_material)
light_point.set_color(np.random.uniform([0.5,0.5,0.5],[1,1,1]))
location = bproc.sampler.shell(center = [0, 0, 0], radius_min = 1, radius_max = 1.5,
elevation_min = 5, elevation_max = 89)
light_point.set_location(location)
# sample CC Texture and assign to room planes
random_cc_texture = np.random.choice(cc_textures)
for plane in room_planes:
plane.replace_materials(random_cc_texture)
# Sample object poses and check collisions
bproc.object.sample_poses(objects_to_sample = sampled_target_bop_objs + sampled_distractor_bop_objs,
METHOD_NAME = METHOD_NAME,
max_tries = 1000)
# Physics Positioning
bproc.object.simulate_physics_and_fix_final_poses(min_simulation_time=3,
max_simulation_time=10,
check_object_interval=1,
substeps_per_frame = 20,
solver_iters=25)
# BVH tree used for camera obstacle checks
bop_bvh_tree = bproc.object.create_bvh_tree_multi_objects(sampled_target_bop_objs + sampled_distractor_bop_objs)
cam_poses = 0
while cam_poses < 25:
# Sample location
location = bproc.sampler.shell(center = [0, 0, 0],
radius_min = 0.44,
radius_max = 1.42,
elevation_min = 5,
elevation_max = 89)
# Determine point of interest in scene as the object closest to the mean of a subset of objects
poi = bproc.object.compute_poi(np.random.choice(sampled_target_bop_objs, size=15, replace=False))
# Compute rotation based on vector going from location towards poi
rotation_matrix = bproc.camera.rotation_from_forward_vec(poi - location, inplane_rot=np.random.uniform(-3.14159, 3.14159))
# Add homog cam pose based on location an rotation
cam2world_matrix = bproc.math.build_transformation_mat(location, rotation_matrix)
# Check that obstacles are at least 0.3 meter away from the camera and make sure the view interesting enough
if bproc.camera.perform_obstacle_in_view_check(cam2world_matrix, {"min": 0.3}, bop_bvh_tree):
# Persist camera pose
bproc.camera.add_camera_pose(cam2world_matrix, frame=cam_poses)
cam_poses += 1
# render the whole pipeline
data = bproc.renderer.render()
# Write data in bop format
bproc.writer.write_bop(os.path.join(args.output_dir, 'bop_data'),
target_objects = sampled_target_bop_objs,
dataset = 'hb',
depth_scale = 0.1,
depths = data["depth"],
colors = data["colors"],
color_file_format = "JPEG",
ignore_dist_thres = 10)
for obj in (sampled_target_bop_objs + sampled_distractor_bop_objs):
obj.disable_rigidbody()
obj.hide(True) |
6,306 | process raw response | from __future__ import annotations
from datetime import datetime
from typing import Any, List, Optional
from rest_framework.exceptions import ParseError
from rest_framework.request import Request
from rest_framework.response import Response
from snuba_sdk import (
Column,
Condition,
Direction,
Entity,
Function,
Granularity,
Limit,
Offset,
Op,
OrderBy,
Query,
)
from snuba_sdk import Request as SnubaRequest
from sentry import features
from sentry.api.api_owners import ApiOwner
from sentry.api.api_publish_status import ApiPublishStatus
from sentry.api.base import region_silo_endpoint
from sentry.api.bases.organization import NoProjects, OrganizationEndpoint
from sentry.api.event_search import SearchConfig
from sentry.api.paginator import GenericOffsetPaginator
from sentry.models.organization import Organization
from sentry.replays.lib.query import Number, QueryConfig, get_valid_sort_commands
from sentry.replays.query import Paginators, make_pagination_values
from sentry.replays.validators import ReplaySelectorValidator
from sentry.utils.snuba import raw_snql_query
@region_silo_endpoint
class OrganizationReplaySelectorIndexEndpoint(OrganizationEndpoint):
publish_status = {
"GET": ApiPublishStatus.UNKNOWN,
}
owner = ApiOwner.REPLAY
def get_replay_filter_params(self, request, organization):
filter_params = self.get_filter_params(request, organization)
has_global_views = features.has(
"organizations:global-views", organization, actor=request.user
)
if not has_global_views and len(filter_params.get("project_id", [])) > 1:
raise ParseError(detail="You cannot view events from multiple projects.")
return filter_params
def get(self, request: Request, organization: Organization) -> Response:
if not features.has("organizations:session-replay", organization, actor=request.user):
return Response(status=404)
try:
filter_params = self.get_replay_filter_params(request, organization)
except NoProjects:
return Response({"data": []}, status=200)
result = ReplaySelectorValidator(data=request.GET)
if not result.is_valid():
raise ParseError(result.errors)
for key, value in result.validated_data.items():
if key not in filter_params:
filter_params[key] = value
def data_fn(offset, limit):
return query_selector_collection(
project_ids=filter_params["project_id"],
start=filter_params["start"],
end=filter_params["end"],
sort=filter_params.get("sort"),
limit=limit,
offset=offset,
organization=organization,
)
return self.paginate(
request=request,
paginator=GenericOffsetPaginator(data_fn=data_fn),
on_results=lambda results: {"data": METHOD_NAME(results)},
)
selector_search_config = SearchConfig(numeric_keys={"count_dead_clicks", "count_rage_clicks"})
class SelectorQueryConfig(QueryConfig):
count_dead_clicks = Number()
count_rage_clicks = Number()
def query_selector_collection(
project_ids: List[int],
start: datetime,
end: datetime,
sort: Optional[str],
limit: Optional[str],
offset: Optional[str],
organization: Organization,
) -> dict:
"""Query aggregated replay collection."""
if organization:
tenant_ids = {"organization_id": organization.id}
else:
tenant_ids = {}
paginators = make_pagination_values(limit, offset)
response = query_selector_dataset(
project_ids=project_ids,
start=start,
end=end,
pagination=paginators,
sort=sort,
tenant_ids=tenant_ids,
)
return response["data"]
def query_selector_dataset(
project_ids: List[int],
start: datetime,
end: datetime,
pagination: Optional[Paginators],
sort: Optional[str],
tenant_ids: dict[str, Any] | None = None,
):
query_options = {}
if pagination:
query_options["limit"] = Limit(pagination.limit)
query_options["offset"] = Offset(pagination.offset)
sorting = get_valid_sort_commands(
sort,
default=OrderBy(Column("count_dead_clicks"), Direction.DESC),
query_config=SelectorQueryConfig(),
)
snuba_request = SnubaRequest(
dataset="replays",
app_id="replay-backend-web",
query=Query(
match=Entity("replays"),
select=[
Column("click_tag"),
Column("click_id"),
Column("click_class"),
Column("click_role"),
Column("click_alt"),
Column("click_testid"),
Column("click_aria_label"),
Column("click_title"),
Function("sum", parameters=[Column("click_is_dead")], alias="count_dead_clicks"),
Function("sum", parameters=[Column("click_is_rage")], alias="count_rage_clicks"),
],
where=[
Condition(Column("project_id"), Op.IN, project_ids),
Condition(Column("timestamp"), Op.LT, end),
Condition(Column("timestamp"), Op.GTE, start),
Condition(Column("click_tag"), Op.NEQ, ""),
],
orderby=sorting,
groupby=[
Column("click_tag"),
Column("click_id"),
Column("click_class"),
Column("click_role"),
Column("click_alt"),
Column("click_testid"),
Column("click_aria_label"),
Column("click_title"),
],
granularity=Granularity(3600),
**query_options,
),
tenant_ids=tenant_ids,
)
return raw_snql_query(snuba_request, "replays.query.query_replays_dataset")
def METHOD_NAME(response: list[dict[str, Any]]) -> list[dict[str, Any]]:
"""Process the response further into the expected output."""
def make_selector_name(row) -> str:
selector = row["click_tag"]
if row["click_id"]:
selector = selector + f"#{row['click_id']}"
if row["click_class"]:
selector = selector + "." + ".".join(row["click_class"])
if row["click_role"]:
selector = selector + f'[role="{row["click_role"]}"]'
if row["click_alt"]:
selector = selector + f'[alt="{row["click_alt"]}"]'
if row["click_testid"]:
selector = selector + f'[testid="{row["click_testid"]}"]'
if row["click_aria_label"]:
selector = selector + f'[aria="{row["click_aria_label"]}"]'
if row["click_title"]:
selector = selector + f'[title="{row["click_title"]}"]'
return selector
return [
{
"dom_element": make_selector_name(row),
"count_dead_clicks": row["count_dead_clicks"],
"count_rage_clicks": row["count_rage_clicks"],
}
for row in response
] |
6,307 | send message | import uuid
import time
import os
import json
import logging
import socket
import sys
import platform
from parsl.utils import setproctitle
from parsl.multiprocessing import ForkProcess
from parsl.dataflow.states import States
from parsl.version import VERSION as PARSL_VERSION
logger = logging.getLogger(__name__)
def async_process(fn):
""" Decorator function to launch a function as a separate process """
def run(*args, **kwargs):
proc = ForkProcess(target=fn, args=args, kwargs=kwargs, name="Usage-Tracking")
proc.start()
return proc
return run
@async_process
def udp_messenger(domain_name, UDP_IP, UDP_PORT, sock_timeout, message):
"""Send UDP messages to usage tracker asynchronously
This multiprocessing based messenger was written to overcome the limitations
of signalling/terminating a thread that is blocked on a system call. This
messenger is created as a separate process, and initialized with 2 queues,
to_send to receive messages to be sent to the internet.
Args:
- domain_name (str) : Domain name string
- UDP_IP (str) : IP address YYY.YYY.YYY.YYY
- UDP_PORT (int) : UDP port to send out on
- sock_timeout (int) : Socket timeout
- to_send (multiprocessing.Queue) : Queue of outgoing messages to internet
"""
setproctitle("parsl: Usage tracking")
try:
if message is None:
raise ValueError("message was none")
encoded_message = bytes(message, "utf-8")
if encoded_message is None:
raise ValueError("utf-8 encoding of message failed")
if domain_name:
try:
UDP_IP = socket.gethostbyname(domain_name)
except Exception:
# (False, "Domain lookup failed, defaulting to {0}".format(UDP_IP))
pass
if UDP_IP is None:
raise Exception("UDP_IP is None")
if UDP_PORT is None:
raise Exception("UDP_PORT is None")
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
sock.settimeout(sock_timeout)
sock.sendto(encoded_message, (UDP_IP, UDP_PORT))
sock.close()
except socket.timeout:
logger.debug("Failed to send usage tracking data: socket timeout")
except OSError as e:
logger.debug("Failed to send usage tracking data: OSError: {}".format(e))
except Exception as e:
logger.debug("Failed to send usage tracking data: Exception: {}".format(e))
class UsageTracker:
"""Usage Tracking for Parsl.
The server for this is here: https://github.com/Parsl/parsl_tracking
This issue captures the discussion that went into functionality
implemented here: https://github.com/Parsl/parsl/issues/34
"""
def __init__(self, dfk, ip='52.3.111.203', port=50077,
domain_name='tracking.parsl-project.org'):
"""Initialize usage tracking unless the user has opted-out.
We will try to resolve the hostname specified in kwarg:domain_name
and if that fails attempt to use the kwarg:ip. Determining the
IP and sending message is threaded to avoid slowing down DFK
initialization.
Tracks usage stats by inspecting the internal state of the dfk.
Args:
- dfk (DFK object) : Data Flow Kernel object
KWargs:
- ip (string) : IP address
- port (int) : Port number, Default:50077
- domain_name (string) : Domain name, will override IP
Default: tracking.parsl-project.org
"""
self.domain_name = domain_name
self.ip = ip
# The sock timeout will only apply to UDP send and not domain resolution
self.sock_timeout = 5
self.UDP_PORT = port
self.UDP_IP = None
self.procs = []
self.dfk = dfk
self.config = self.dfk.config
self.uuid = str(uuid.uuid4())
self.parsl_version = PARSL_VERSION
self.python_version = "{}.{}.{}".format(sys.version_info.major,
sys.version_info.minor,
sys.version_info.micro)
self.tracking_enabled = self.check_tracking_enabled()
logger.debug("Tracking status: {}".format(self.tracking_enabled))
self.initialized = False # Once first message is sent this will be True
def check_tracking_enabled(self):
"""Check if tracking is enabled.
Tracking will be enabled unless either of these is true:
1. dfk.config.usage_tracking is set to False
2. Environment variable PARSL_TRACKING is set to false (case insensitive)
"""
track = True
if not self.config.usage_tracking:
track = False
envvar = str(os.environ.get("PARSL_TRACKING", True)).lower()
if envvar == "false":
track = False
return track
def construct_start_message(self):
"""Collect preliminary run info at the start of the DFK.
Returns :
- Message dict dumped as json string, ready for UDP
"""
message = {'uuid': self.uuid,
'test': False, # this field previously indicated if parsl
# was being run in test mode, and is
# retained for protocol compatibility
'parsl_v': self.parsl_version,
'python_v': self.python_version,
'os': platform.system(),
'os_v': platform.release(),
'start': time.time()}
return json.dumps(message)
def construct_end_message(self):
"""Collect the final run information at the time of DFK cleanup.
Returns:
- Message dict dumped as json string, ready for UDP
"""
app_count = self.dfk.task_count
site_count = len(self.dfk.config.executors)
app_fails = self.dfk.task_state_counts[States.failed] + self.dfk.task_state_counts[States.dep_fail]
message = {'uuid': self.uuid,
'end': time.time(),
't_apps': app_count,
'sites': site_count,
'c_time': None,
'failed': app_fails,
'test': False, # see comment in construct_start_message
}
return json.dumps(message)
def send_UDP_message(self, message):
"""Send UDP message."""
x = 0
if self.tracking_enabled:
try:
proc = udp_messenger(self.domain_name, self.UDP_IP, self.UDP_PORT, self.sock_timeout, message)
self.procs.append(proc)
except Exception as e:
logger.debug("Usage tracking failed: {}".format(e))
else:
x = -1
return x
def METHOD_NAME(self) -> float:
"""Send message over UDP.
Returns:
time taken
"""
start = time.time()
message = None
if not self.initialized:
message = self.construct_start_message()
self.initialized = True
else:
message = self.construct_end_message()
self.send_UDP_message(message)
end = time.time()
return end - start
def close(self):
"""We terminate (SIGTERM) the processes added to the self.procs list """
for proc in self.procs:
proc.terminate() |
6,308 | run | import numpy as np
from scipy.constants import epsilon_0
from scipy.constants import mu_0
from SimPEG.electromagnetics.utils import k, omega
__all__ = ["MT_LayeredEarth"]
# Evaluate Impedance Z of a layer
def _ImpZ(f, mu, k):
return omega(f) * mu / k
# Complex Cole-Cole Conductivity - EM utils
def _PCC(siginf, m, t, c, f):
return siginf * (1.0 - (m / (1.0 + (1j * omega(f) * t) ** c)))
# matrix P relating Up and Down components with E and H fields
def _P(z):
return np.array([[1.0, 1.0], [-1.0 / z, 1.0 / z]])
def _Pinv(z):
return np.array([[1.0, -z], [1.0, z]]) / 2.0
# matrix T for transition of Up and Down components accross a layer
def _T(h, k):
return np.array([[np.exp(1j * k * h), 0.0], [0.0, np.exp(-1j * k * h)]])
def _Tinv(h, k):
return np.array([[np.exp(-1j * k * h), 0.0], [0.0, np.exp(1j * k * h)]])
# Propagate Up and Down component for a certain frequency & evaluate E and H field
def _Propagate(f, thickness, sig, chg, taux, c, mu_r, eps_r, n):
if isinstance(eps_r, float):
epsmodel = np.ones_like(sig) * eps_r
else:
epsmodel = eps_r
if isinstance(mu_r, float):
mumodel = np.ones_like(sig) * mu_r
else:
epsmodel = mu_r
sigcm = np.zeros_like(sig, dtype="complex_")
if chg == 0.0 or taux == 0.0 or c == 0.0:
sigcm = sig
else:
for j in range(1, len(sigcm)):
sigcm[j] = _PCC(sig[j], chg[j], taux[j], c[j], f)
sigcm = np.append(np.r_[0.0], sigcm)
mu = np.append(np.r_[1.0], mumodel) * mu_0
eps = np.append(np.r_[1.0], epsmodel) * epsilon_0
H = np.append(np.r_[1.2 * (1e5)], thickness)
K = k(f, sigcm, mu, eps)
Z = _ImpZ(f, mu, K)
EH = np.zeros((2, n + 1), dtype="complex_")
UD = np.zeros((2, n + 1), dtype="complex_")
UD[1, -1] = 1.0
for i in range(-2, -(n + 2), -1):
UD[:, i] = _Tinv(H[i + 1], K[i]) @ _Pinv(Z[i]) @ _P(Z[i + 1]) @ UD[:, i + 1]
UD = UD / ((np.abs(UD[0, :] + UD[1, :])).max())
for j in range(0, n + 1):
EH[:, j] = (
np.array(
[
[
1.0,
1,
],
[-1.0 / Z[j], 1.0 / Z[j]],
]
)
@ UD[:, j]
)
return UD, EH, Z, K
# Utils to compute the apparent impedance over a layered Earth Model
def MT_LayeredEarth(
freq,
thickness,
sig,
return_type="Res-Phase",
chg=0.0,
tau=0.0,
c=0.0,
mu_r=1.0,
eps_r=1.0,
):
"""
This code compute the analytic response of a n-layered Earth to a plane wave (Magnetotellurics).
All physical properties arrays convention describes the layers parameters from the top layer to the bottom layer.
The solution is first developed in Ward and Hohmann 1988.
See also https://em.geosci.xyz/content/maxwell3_fdem/natural_sources/MT_N_layered_Earth.html
:param freq: the frequency at which we take the measurements
:type freq: float or numpy.ndarray
:param thickness: thickness of the Earth layers in meters, size is len(sig)-1. The last one is already considered infinite. For 1-layer Earth, thickness = None or 0.
:type thickness: float or numpy.ndarray
:param sig: electric conductivity of the Earth layers in S/m
:type sig: float or numpy.ndarray
:param str return_type: Output return_type. 'Res-Phase' returns apparent resisitivity and Phase. 'Impedance' returns the complex Impedance
:param numpy.ndarray chg: Cole-Cole Parameters for chargeable layers: chargeability
:param numpy.ndarray tau: Cole-Cole Parameters for chargeable layers: time decay constant
:param numpy.ndarray c: Cole-Cole Parameters for chargeable layers: geometric factor
:param mu_r: relative magnetic permeability
:type mu_r: float or numpy.ndarray
:param eps_r: relative dielectric permittivity
:type eps_r: float or numpy.ndarray
"""
if isinstance(freq, float):
F = np.r_[freq]
else:
F = freq
if isinstance(sig, float):
sigmodel = np.r_[sig]
else:
sigmodel = sig
if isinstance(thickness, float):
if thickness == 0.0:
thickmodel = np.empty(0)
else:
thickmodel = np.r_[thickness]
elif thickness is None:
thickmodel = np.empty(0)
else:
thickmodel = thickness
# Count the number of layers
nlayer = len(sigmodel)
Res = np.zeros_like(F)
Phase = np.zeros_like(F)
App_ImpZ = np.zeros_like(F, dtype="complex_")
for i in range(0, len(F)):
_, EH, _, _ = _Propagate(
F[i], thickmodel, sigmodel, chg, tau, c, mu_r, eps_r, nlayer
)
App_ImpZ[i] = EH[0, 1] / EH[1, 1]
Res[i] = np.abs(App_ImpZ[i]) ** 2.0 / (mu_0 * omega(F[i]))
Phase[i] = np.angle(App_ImpZ[i], deg=True)
if return_type == "Res-Phase":
return Res, Phase
elif return_type == "Impedance":
return App_ImpZ
def METHOD_NAME():
# nlayer=1
F0 = 1.0
H0 = None
H01 = 0.0
sign0 = 0.1
# nlayer = 2
F1 = np.r_[1e-5, 1e3]
H1 = 200.0
sign1 = np.r_[0.1, 1.0]
# nlayer1 = 3
F2 = 1e-3
H2 = np.r_[200.0, 50.0]
sign2 = np.r_[0.01, 1.0, 0.1]
fm = "Impedance"
Res, Phase = MT_LayeredEarth(F0, H0, sign0)
print(Res, Phase)
Res, Phase = MT_LayeredEarth(F0, H01, sign0)
print(Res, Phase)
Res, Phase = MT_LayeredEarth(F1, H1, sign1)
print(Res, Phase)
appimp = MT_LayeredEarth(F2, H2, sign2, return_type=fm)
print(appimp)
if __name__ == "__main__":
METHOD_NAME() |
6,309 | open | """
Smart object module.
"""
from __future__ import absolute_import, unicode_literals
import contextlib
import logging
import io
import os
from psd_tools.constants import Tag
logger = logging.getLogger(__name__)
class SmartObject(object):
"""
Smart object that represents embedded or external file.
Smart objects are attached to
:py:class:`~psd_tools.api.layers.SmartObjectLayer`.
"""
def __init__(self, layer):
self._config = None
for key in (
Tag.SMART_OBJECT_LAYER_DATA1, Tag.SMART_OBJECT_LAYER_DATA2
):
if key in layer.tagged_blocks:
self._config = layer.tagged_blocks.get_data(key)
break
self._data = None
for key in (
Tag.LINKED_LAYER1, Tag.LINKED_LAYER2, Tag.LINKED_LAYER3,
Tag.LINKED_LAYER_EXTERNAL
):
if key in layer._psd.tagged_blocks:
data = layer._psd.tagged_blocks.get_data(key)
for item in data:
if item.uuid == self.unique_id:
self._data = item
break
if self._data:
break
@property
def kind(self):
"""Kind of the link, 'data', 'alias', or 'external'."""
return self._data.kind.name.lower()
@property
def filename(self):
"""Original file name of the object."""
return self._data.filename.strip('\x00')
@contextlib.contextmanager
def METHOD_NAME(self, external_dir=None):
"""
Open the smart object as binary IO.
:param external_dir: Path to the directory of the external file.
Example::
with layer.smart_object.open() as f:
data = f.read()
"""
if self.kind == 'data':
with io.BytesIO(self._data.data) as f:
yield f
elif self.kind == 'external':
filepath = self._data.linked_file[b'fullPath'].value
filepath = filepath.replace('\x00', '').replace('file://', '')
if not os.path.exists(filepath):
filepath = self._data.linked_file[b'relPath'].value
filepath = filepath.replace('\x00', '')
if external_dir is not None:
filepath = os.path.join(external_dir, filepath)
with METHOD_NAME(filepath, 'rb') as f:
yield f
else:
raise NotImplementedError('alias is not supported.')
@property
def data(self):
"""Embedded file content, or empty if kind is `external` or `alias`"""
if self.kind == 'data':
return self._data.data
else:
with self.METHOD_NAME() as f:
return f.read()
@property
def unique_id(self):
"""UUID of the object."""
return self._config.data.get(b'Idnt').value.strip('\x00')
@property
def filesize(self):
"""File size of the object."""
if self.kind == 'data':
return len(self._data.data)
return self._data.filesize
@property
def filetype(self):
"""Preferred file extension, such as `jpg`."""
return self._data.filetype.lower().strip().decode('ascii')
def is_psd(self):
"""Return True if the file is embedded PSD/PSB."""
return self.filetype in ('8bpb', '8bps')
@property
def warp(self):
"""Warp parameters."""
return self._config.data.get(b'warp')
@property
def resolution(self):
"""Resolution of the object."""
return self._config.data.get(b'Rslt').value
def save(self, filename=None):
"""
Save the smart object to a file.
:param filename: File name to export. If None, use the embedded name.
"""
if filename is None:
filename = self.filename
with METHOD_NAME(filename, 'wb') as f:
f.write(self.data)
def __repr__(self):
return "SmartObject(%r kind=%r type=%r size=%s)" % (
self.filename, self.kind, self.filetype, self.filesize
) |
6,310 | shared pkgs dirs | import copy
import os
import pathlib
import platform
from typing import Any, Generator, Mapping
import pytest
from . import helpers
####################
# Config options #
####################
def pytest_addoption(parser):
"""Add command line argument to pytest."""
parser.addoption(
"--mamba-pkgs-dir",
action="store",
default=None,
help="Package cache to reuse between tests",
)
parser.addoption(
"--no-eager-clean",
action="store_true",
default=False,
help=(
"Do not eagerly delete temporary folders such as HOME and MAMBA_ROOT_PREFIX"
"created during tests."
"These folders take a lot of disk space so we delete them eagerly."
"For debugging, it can be convenient to keep them."
"With this option, cleaning will fallback on the default pytest policy."
),
)
##################
# Test fixture #
##################
@pytest.fixture(autouse=True)
def tmp_environ() -> Generator[Mapping[str, Any], None, None]:
"""Saves and restore environment variables.
This is used for test that need to modify ``os.environ``
"""
old_environ = copy.deepcopy(os.environ)
yield old_environ
os.environ.clear()
os.environ.update(old_environ)
@pytest.fixture
def tmp_home(
request, tmp_environ, tmp_path_factory: pytest.TempPathFactory
) -> Generator[pathlib.Path, None, None]:
"""Change the home directory to a tmp folder for the duration of a test."""
# Try multiple combination for Unix/Windows
home_envs = ["HOME", "USERPROFILE"]
used_homes = [env for env in home_envs if env in os.environ]
new_home = pathlib.Path.home()
if len(used_homes) > 0:
new_home = tmp_path_factory.mktemp("home")
new_home.mkdir(parents=True, exist_ok=True)
for env in used_homes:
os.environ[env] = str(new_home)
if platform.system() == "Windows":
os.environ["APPDATA"] = str(new_home / "AppData" / "Roaming")
os.environ["LOCALAPPDATA"] = str(new_home / "AppData" / "Local")
yield new_home
# Pytest would clean it automatically but this can be large (0.5 Gb for repodata)
# We clean it explicitly
if not request.config.getoption("--no-eager-clean"):
try:
helpers.rmtree(new_home)
except PermissionError:
pass
@pytest.fixture
def tmp_clean_env(tmp_environ: None) -> None:
"""Remove all Conda/Mamba activation artifacts from environment."""
for k, v in os.environ.items():
if k.startswith(("CONDA", "_CONDA", "MAMBA", "_MAMBA", "XDG_")):
del os.environ[k]
def keep_in_path(
p: str, prefix: str | None = tmp_environ.get("CONDA_PREFIX")
) -> bool:
if "condabin" in p:
return False
# On windows, PATH is also used for dyanamic libraries.
if (prefix is not None) and (platform.system() != "Windows"):
p = str(pathlib.Path(p).expanduser().resolve())
prefix = str(pathlib.Path(prefix).expanduser().resolve())
return not p.startswith(prefix)
return True
path_list = os.environ["PATH"].split(os.pathsep)
path_list = [p for p in path_list if keep_in_path(p)]
os.environ["PATH"] = os.pathsep.join(path_list)
# os.environ restored by tmp_clean_env and tmp_environ
@pytest.fixture(scope="session")
def tmp_pkgs_dirs(tmp_path_factory: pytest.TempPathFactory, request) -> pathlib.Path:
"""A common package cache for mamba downloads.
The directory is not used automatically when calling this fixture.
"""
if (p := request.config.getoption("--mamba-pkgs-dir")) is not None:
p = pathlib.Path(p)
p.mkdir(parents=True, exist_ok=True)
return p
return tmp_path_factory.mktemp("pkgs_dirs")
@pytest.fixture(params=[False])
def METHOD_NAME(request) -> bool:
"""A dummy fixture to control the use of shared package dir."""
return request.param
@pytest.fixture
def tmp_root_prefix(
request,
tmp_path_factory: pytest.TempPathFactory,
tmp_clean_env: None,
tmp_pkgs_dirs: pathlib.Path,
METHOD_NAME: bool,
) -> Generator[pathlib.Path, None, None]:
"""Change the micromamba root directory to a tmp folder for the duration of a test."""
new_root_prefix = tmp_path_factory.mktemp("mamba")
new_root_prefix.mkdir(parents=True, exist_ok=True)
os.environ["MAMBA_ROOT_PREFIX"] = str(new_root_prefix)
if METHOD_NAME:
os.environ["CONDA_PKGS_DIRS"] = str(tmp_pkgs_dirs)
yield new_root_prefix
# Pytest would clean it automatically but this can be large (0.5 Gb for repodata)
# We clean it explicitly
if not request.config.getoption("--no-eager-clean"):
if new_root_prefix.exists():
helpers.rmtree(new_root_prefix)
# os.environ restored by tmp_clean_env and tmp_environ
@pytest.fixture(params=[helpers.random_string])
def tmp_env_name(request) -> str:
"""Return the explicit or implicit parametrization."""
if callable(request.param):
return request.param()
return request.param
@pytest.fixture
def tmp_empty_env(
tmp_root_prefix: pathlib.Path, tmp_env_name: str
) -> Generator[pathlib.Path, None, None]:
"""An empty environment created under a temporary root prefix."""
helpers.create("-n", tmp_env_name, no_dry_run=True)
yield tmp_root_prefix / "envs" / tmp_env_name
@pytest.fixture
def tmp_prefix(tmp_empty_env: pathlib.Path) -> Generator[pathlib.Path, None, None]:
"""Change the conda prefix to a tmp folder for the duration of a test."""
os.environ["CONDA_PREFIX"] = str(tmp_empty_env)
yield tmp_empty_env
# os.environ restored by tmp_environ through tmp_root_prefix
@pytest.fixture
def tmp_xtensor_env(tmp_prefix: pathlib.Path) -> Generator[pathlib.Path, None, None]:
"""An activated environment with Xtensor installed."""
helpers.install("-c", "conda-forge", "--json", "xtensor", no_dry_run=True)
yield tmp_prefix
@pytest.fixture
def user_config_dir(tmp_home: pathlib.Path) -> Generator[pathlib.Path, None, None]:
"""Location of config files that are generated from mamba"""
maybe_xdg_config = os.getenv("XDG_CONFIG_DIR", "")
if maybe_xdg_config:
yield pathlib.Path(maybe_xdg_config)
system = platform.system()
if system == "Linux" or system == "Darwin":
yield tmp_home / ".config/mamba"
elif system == "Windows":
yield pathlib.Path(os.environ["APPDATA"]) / "mamba"
else:
raise RuntimeError(f"Unsupported system {system}")
@pytest.fixture
def user_data_dir(tmp_home: pathlib.Path) -> Generator[pathlib.Path, None, None]:
"""Location of data files that are generated from mamba"""
maybe_xdg_data = os.getenv("XDG_DATA_DIR", "")
if maybe_xdg_data:
yield pathlib.Path(maybe_xdg_data)
system = platform.system()
if system == "Linux" or system == "Darwin":
yield tmp_home / ".local/share/mamba"
elif system == "Windows":
yield pathlib.Path(os.environ["APPDATA"]) / "mamba"
else:
raise RuntimeError(f"Unsupported system {system}")
@pytest.fixture
def user_cache_dir(tmp_home: pathlib.Path) -> Generator[pathlib.Path, None, None]:
"""Location of data files that are generated from mamba"""
maybe_xdg_cache = os.getenv("XDG_CACHE_DIR", "")
if maybe_xdg_cache:
yield pathlib.Path(maybe_xdg_cache)
system = platform.system()
if system == "Linux" or system == "Darwin":
yield tmp_home / ".cache/mamba"
elif system == "Windows":
yield pathlib.Path(os.environ["LOCALAPPDATA"]) / "mamba"
else:
raise RuntimeError(f"Unsupported system {system}") |
6,311 | get root as int16 encoded xfb array | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: NetEncoding
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class Int16EncodedXFBArray(object):
__slots__ = ["_tab"]
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = Int16EncodedXFBArray()
x.Init(buf, n + offset)
return x
@classmethod
def METHOD_NAME(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
# Int16EncodedXFBArray
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# Int16EncodedXFBArray
def Codes(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(
flatbuffers.number_types.Int16Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 2)
)
return 0
# Int16EncodedXFBArray
def CodesAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int16Flags, o)
return 0
# Int16EncodedXFBArray
def CodesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Int16EncodedXFBArray
def CodesIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
return o == 0
# Int16EncodedXFBArray
def Max(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return 0.0
# Int16EncodedXFBArray
def Min(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return 0.0
# Int16EncodedXFBArray
def Nbins(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
def Start(builder):
builder.StartObject(4)
def Int16EncodedXFBArrayStart(builder):
"""This method is deprecated. Please switch to Start."""
return Start(builder)
def AddCodes(builder, codes):
builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(codes), 0)
def Int16EncodedXFBArrayAddCodes(builder, codes):
"""This method is deprecated. Please switch to AddCodes."""
return AddCodes(builder, codes)
def StartCodesVector(builder, numElems):
return builder.StartVector(2, numElems, 2)
def Int16EncodedXFBArrayStartCodesVector(builder, numElems):
"""This method is deprecated. Please switch to Start."""
return StartCodesVector(builder, numElems)
def AddMax(builder, max):
builder.PrependFloat32Slot(1, max, 0.0)
def Int16EncodedXFBArrayAddMax(builder, max):
"""This method is deprecated. Please switch to AddMax."""
return AddMax(builder, max)
def AddMin(builder, min):
builder.PrependFloat32Slot(2, min, 0.0)
def Int16EncodedXFBArrayAddMin(builder, min):
"""This method is deprecated. Please switch to AddMin."""
return AddMin(builder, min)
def AddNbins(builder, nbins):
builder.PrependInt32Slot(3, nbins, 0)
def Int16EncodedXFBArrayAddNbins(builder, nbins):
"""This method is deprecated. Please switch to AddNbins."""
return AddNbins(builder, nbins)
def End(builder):
return builder.EndObject()
def Int16EncodedXFBArrayEnd(builder):
"""This method is deprecated. Please switch to End."""
return End(builder) |
6,312 | relabel | #!/usr/bin/env python
# Copyright 2021-2022 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Derived from https://github.com/bryancatanzaro/kmeans
import argparse
from benchmark import parse_args, run_benchmark
def initialize(N, D, C, T):
# Uncomment this if we want execution to be deterministic
# np.random.seed(0)
data = np.random.random((N, D)).astype(T)
# Since points are random, we'll just generate some random centers
centroids = np.random.random((C, D)).astype(T)
return data, centroids
def calculate_distances(data, centroids, data_dots):
centroid_dots = np.square(np.linalg.norm(centroids, ord=2, axis=1))
pairwise_distances = (
data_dots[:, np.newaxis] + centroid_dots[np.newaxis, :]
)
# ||x-y||^2 = ||x||^2 + ||y||^2 - 2 x . y
# pairwise_distances has ||x||^2 + ||y||^2, so beta = 1
# The gemm calculates x.y for all x and y, so alpha = -2.0
pairwise_distances -= 2.0 * np.dot(data, centroids.T)
return pairwise_distances
def METHOD_NAME(pairwise_distances, data_index):
new_labels = np.argmin(pairwise_distances, axis=1)
distances = pairwise_distances[data_index, new_labels]
return new_labels, distances
def find_centroids(data, labels, C, D):
# Sort the points by their labels
indices = np.argsort(labels)
sorted_points = data[indices]
# Compute counts and indexes for ending of sets of points for each centroid
counts = np.bincount(labels, minlength=C)
indexes = np.cumsum(counts)
# Now we can use the indexes to split the array into sub-arrays and then
# sum across them to create the centroids
centroids = np.empty((C, D), dtype=data.dtype)
ragged_arrays = np.split(sorted_points, indexes)
for idx in range(C):
centroids[idx, :] = np.sum(ragged_arrays[idx], axis=0)
# To avoid introducing divide by zero errors
# If a centroid has no weight, we'll do no normalization
# This will keep its coordinates defined.
counts = np.maximum(counts, 1)
return centroids / counts[:, np.newaxis]
def run_kmeans(C, D, T, I, N, S, benchmarking): # noqa: E741
print("Running kmeans...")
print("Number of data points: " + str(N))
print("Number of dimensions: " + str(D))
print("Number of centroids: " + str(C))
print("Max iterations: " + str(I))
timer.start()
data, centroids = initialize(N, D, C, T)
data_dots = np.square(np.linalg.norm(data, ord=2, axis=1))
data_index = np.linspace(0, N - 1, N, dtype=np.int)
labels = None
iteration = 0
prior_distance_sum = None
# We run for max iterations or until we converge
# We only test convergence every S iterations
while iteration < I:
pairwise_distances = calculate_distances(data, centroids, data_dots)
new_labels, distances = METHOD_NAME(pairwise_distances, data_index)
distance_sum = np.sum(distances)
centroids = find_centroids(data, new_labels, C, D)
if iteration > 0 and iteration % S == 0:
changes = np.not_equal(labels, new_labels)
total_changes = np.sum(changes)
delta = distance_sum / prior_distance_sum
print(
"Iteration "
+ str(iteration)
+ " produced "
+ str(total_changes)
+ " changes, and total distance is "
+ str(distance_sum)
)
# We ignore the result of the threshold test in the case that we
# are running performance benchmarks to measure performance for a
# certain number of iterations
if delta > 1 - 0.000001 and not benchmarking:
print("Threshold triggered, terminating iterations early")
break
prior_distance_sum = distance_sum
labels = new_labels
iteration += 1
# This final distance sum also synchronizes the results
print(
"Final distance sum at iteration "
+ str(iteration)
+ ": "
+ str(prior_distance_sum)
)
total = timer.stop()
print("Elapsed Time: " + str(total) + " ms")
return total
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-c",
"--centers",
type=int,
default=10,
dest="C",
help="number of centroids",
)
parser.add_argument(
"-d",
"--dims",
type=int,
default=2,
dest="D",
help="number of dimensions for each input data point",
)
parser.add_argument(
"-m",
"--max-iters",
type=int,
default=1000,
dest="I",
help="maximum number of iterations to run the algorithm for",
)
parser.add_argument(
"-n",
"--num",
type=int,
default=10,
dest="N",
help="number of elements in the data set in thousands",
)
parser.add_argument(
"--precision",
type=int,
default=32,
dest="P",
help="precision of the computation in bits",
)
parser.add_argument(
"-s",
"--sample",
type=int,
default=25,
dest="S",
help="number of iterations between sampling the log likelihood",
)
args, np, timer = parse_args(parser)
if args.P == 16:
run_benchmark(
run_kmeans,
args.benchmark,
"KMEANS(H)",
(
args.C,
args.D,
np.float16,
args.I,
args.N * 1000,
args.S,
args.benchmark > 1,
),
)
elif args.P == 32:
run_benchmark(
run_kmeans,
args.benchmark,
"KMEANS(S)",
(
args.C,
args.D,
np.float32,
args.I,
args.N * 1000,
args.S,
args.benchmark > 1,
),
)
elif args.P == 64:
run_benchmark(
run_kmeans,
args.benchmark,
"KMEANS(D)",
(
args.C,
args.D,
np.float64,
args.I,
args.N * 1000,
args.S,
args.benchmark > 1,
),
)
else:
raise TypeError("Precision must be one of 16, 32, or 64") |
6,313 | editions | # Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import subprocess
import sys
import llnl.util.tty as tty
from spack.package import *
class Catalyst(CMakePackage):
"""Catalyst is an in situ library, with an adaptable application
programming interface (API), that orchestrates the alliance
between simulation and analysis and/or visualization tasks. For
versions 5.7 and greater use the paraview package.
"""
homepage = "http://www.paraview.org"
url = "https://www.paraview.org/files/v5.6/ParaView-v5.6.0.tar.xz"
maintainers("danlipsa")
version("5.6.0", sha256="5b49cb96ab78eee0427e25200530ac892f9a3da7725109ce1790f8010cb5b377")
variant("python", default=False, description="Enable Python support")
variant("essentials", default=False, description="Enable Essentials support")
variant("extras", default=False, description="Enable Extras support. Implies Essentials.")
variant(
"rendering",
default=True,
description="Enable Rendering support. Implies Extras and Essentials.",
)
variant("osmesa", default=True, description="Use offscreen rendering")
conflicts("+osmesa", when="~rendering")
extends("python", when="+python")
# VTK < 8.2.1 can't handle Python 3.8
# This affects Paraview <= 5.7 (VTK 8.2.0)
# https://gitlab.kitware.com/vtk/vtk/-/issues/17670
depends_on("python@3:3.7", when="@:5.7 +python", type=("build", "run"))
depends_on("python@3:", when="@5.8:+python", type=("build", "run"))
depends_on("git", type="build")
depends_on("mpi")
depends_on("py-numpy", when="+python", type=("build", "run"))
depends_on("py-mpi4py", when="+python", type=("build", "run"))
depends_on("gl@3.2:", when="+rendering")
depends_on("osmesa", when="+osmesa")
depends_on("glx", when="~osmesa")
depends_on("cmake@3.3:", type="build")
@property
def paraview_subdir(self):
"""The paraview subdirectory name as paraview-major.minor"""
return "paraview-{0}".format(self.spec.version.up_to(2))
@property
def METHOD_NAME(self):
"""Transcribe spack variants into names of Catalyst Editions"""
selected = ["Base"] # Always required
if "+python" in self.spec:
selected.append("Enable-Python")
if "+essentials" in self.spec:
selected.append("Essentials")
if "+extras" in self.spec:
selected.append("Essentials")
selected.append("Extras")
if "+rendering" in self.spec:
selected.append("Essentials")
selected.append("Extras")
selected.append("Rendering-Base")
return selected
def do_stage(self, mirror_only=False):
"""Unpacks and expands the fetched tarball.
Then, generate the catalyst source files."""
super().do_stage(mirror_only)
# extract the catalyst part
catalyst_script = os.path.join(self.stage.source_path, "Catalyst", "catalyze.py")
editions_dir = os.path.join(self.stage.source_path, "Catalyst", "Editions")
catalyst_source_dir = os.path.abspath(self.root_cmakelists_dir)
python_path = os.path.realpath(
self.spec["python"].command.path if "+python" in self.spec else sys.executable
)
command = [
python_path,
catalyst_script,
"-r",
self.stage.source_path,
"-o",
catalyst_source_dir,
]
for edition in self.METHOD_NAME:
command.extend(["-i", os.path.join(editions_dir, edition)])
if not os.path.isdir(catalyst_source_dir):
os.mkdir(catalyst_source_dir)
subprocess.check_call(command)
tty.msg("Generated catalyst source in %s" % self.stage.source_path)
else:
tty.msg("Already generated %s in %s" % (self.name, self.stage.source_path))
def setup_run_environment(self, env):
# paraview 5.5 and later
# - cmake under lib/cmake/paraview-5.5
# - libs under lib
# - python bits under lib/python2.8/site-packages
if os.path.isdir(self.prefix.lib64):
lib_dir = self.prefix.lib64
else:
lib_dir = self.prefix.lib
env.set("ParaView_DIR", self.prefix)
env.prepend_path("LIBRARY_PATH", lib_dir)
env.prepend_path("LD_LIBRARY_PATH", lib_dir)
if "+python" in self.spec:
python_version = self.spec["python"].version.up_to(2)
env.prepend_path(
"PYTHONPATH",
join_path(lib_dir, "python{0}".format(python_version), "site-packages"),
)
def setup_dependent_build_environment(self, env, dependent_spec):
env.set("ParaView_DIR", self.prefix)
@property
def root_cmakelists_dir(self):
"""The relative path to the directory containing CMakeLists.txt
This path is relative to the root of the extracted tarball,
not to the ``build_directory``. Defaults to the current directory.
:return: directory containing CMakeLists.txt
"""
return os.path.join(self.stage.source_path, "Catalyst-v" + str(self.version))
@property
def build_directory(self):
"""Returns the directory to use when building the package
:return: directory where to build the package
"""
return join_path(os.path.abspath(self.root_cmakelists_dir), "spack-build")
def cmake_args(self):
"""Populate cmake arguments for Catalyst."""
spec = self.spec
def variant_bool(feature, on="ON", off="OFF"):
"""Ternary for spec variant to ON/OFF string"""
if feature in spec:
return on
return off
def nvariant_bool(feature):
"""Negated ternary for spec variant to OFF/ON string"""
return variant_bool(feature, on="OFF", off="ON")
cmake_args = [
"-DPARAVIEW_GIT_DESCRIBE=v%s" % str(self.version),
"-DVTK_USE_SYSTEM_EXPAT:BOOL=ON",
"-DVTK_USE_X:BOOL=%s" % nvariant_bool("+osmesa"),
"-DVTK_USE_OFFSCREEN:BOOL=%s" % variant_bool("+osmesa"),
"-DVTK_OPENGL_HAS_OSMESA:BOOL=%s" % variant_bool("+osmesa"),
]
if "+python" in spec:
cmake_args.extend(
[
"-DPARAVIEW_ENABLE_PYTHON:BOOL=ON",
"-DPYTHON_EXECUTABLE:FILEPATH=%s" % spec["python"].command.path,
"-DVTK_USE_SYSTEM_MPI4PY:BOOL=ON",
]
)
else:
cmake_args.append("-DPARAVIEW_ENABLE_PYTHON:BOOL=OFF")
if spec.platform == "linux" and spec.target.family == "aarch64":
cmake_args.append("-DCMAKE_CXX_FLAGS=-DPNG_ARM_NEON_OPT=0")
cmake_args.append("-DCMAKE_C_FLAGS=-DPNG_ARM_NEON_OPT=0")
return cmake_args
def cmake(self, spec, prefix):
"""Runs ``cmake`` in the build directory through the cmake.sh script"""
cmake_script_path = os.path.join(os.path.abspath(self.root_cmakelists_dir), "cmake.sh")
with working_dir(self.build_directory, create=True):
subprocess.check_call(
[cmake_script_path, os.path.abspath(self.root_cmakelists_dir)]
+ self.cmake_args()
+ self.std_cmake_args
) |
6,314 | narrow jsonpath node | import logging
from functools import (
lru_cache,
reduce,
)
from itertools import zip_longest
from typing import Optional
import jsonpath_ng
import jsonpath_ng.ext.filter
@lru_cache(maxsize=256)
def parse_jsonpath(jsonpath_expression: str) -> jsonpath_ng.JSONPath:
"""
parses a JSONPath expression and returns a JSONPath object.
"""
contains_filter = "?" in jsonpath_expression
if not contains_filter:
try:
# the regular parser is faster, but does not support filters
# we will success in this branch most of the time
return jsonpath_ng.parse(jsonpath_expression)
except Exception:
# something we did not cover in our prechecks prevented the use of the
# regular parser. we will try the extended parser, which supports filters
# and more
logging.warning(
f"Unable to parse '{jsonpath_expression}' with the regular parser"
)
return jsonpath_ng.ext.parse(jsonpath_expression)
def METHOD_NAME(
path_1: jsonpath_ng.JSONPath, path_2: jsonpath_ng.JSONPath
) -> jsonpath_ng.JSONPath:
"""
given two jsonpath nodes, return the most specific and narrow one
e.g. an index is more specific than a slice, a filter is more specific than
a slice, etc.
if the two nodes are not compatible, return None. e.g. a filter and an index
might cover different sets of data, so not compatible
"""
if path_1 == path_2:
return path_1
if isinstance(path_1, jsonpath_ng.Fields) and isinstance(
path_2, jsonpath_ng.Fields
):
if path_1.fields == ("*",):
return path_2
if path_2.fields == ("*",):
return path_1
elif isinstance(path_1, jsonpath_ng.Index) and isinstance(
path_2, (jsonpath_ng.Slice, jsonpath_ng.ext.filter.Filter)
):
return path_1
elif isinstance(
path_1, (jsonpath_ng.Slice, jsonpath_ng.ext.filter.Filter)
) and isinstance(path_2, jsonpath_ng.Index):
return path_2
elif isinstance(path_1, jsonpath_ng.ext.filter.Filter) and isinstance(
path_2, jsonpath_ng.Slice
):
return path_1
elif isinstance(path_1, jsonpath_ng.Slice) and isinstance(
path_2, jsonpath_ng.ext.filter.Filter
):
return path_2
return None
def sortable_jsonpath_string_repr(
path: jsonpath_ng.JSONPath, index_padding: int = 5
) -> str:
"""
Return a string representation of the JSONPath that can be used for sorting.
The relevant aspect is the representation of an Index, which needs to be left
padded with zeros to ensure comparability of the string representation.
Please be aware that the resulting string representation is not necessarily
a valid JSONPath expression, even though it might look like one occasionally.
The only purpose of this function is to produce sortable strings.
"""
sortable_parts = []
for p in jsonpath_parts(path, ignore_filter=True):
if isinstance(p, jsonpath_ng.Fields):
sortable_parts.append(p.fields[0])
elif isinstance(p, jsonpath_ng.Index):
sortable_parts.append(f"[{str(p.index).zfill(index_padding)}]")
elif isinstance(p, jsonpath_ng.Slice):
sortable_parts.append("*")
return ".".join(sortable_parts)
def jsonpath_parts(
path: jsonpath_ng.JSONPath, ignore_filter: bool = False, ignore_root: bool = False
) -> list[jsonpath_ng.JSONPath]:
"""
Return a list of JSONPath nodes that make up the given path.
"""
parts: list[jsonpath_ng.JSONPath] = []
while isinstance(path, jsonpath_ng.Child):
current = path.right
path = path.left
if isinstance(current, jsonpath_ng.ext.filter.Filter) and ignore_filter:
continue
parts.insert(0, current)
if isinstance(path, jsonpath_ng.Root) and ignore_root:
return parts
parts.insert(0, path)
return parts
def apply_constraint_to_path(
path: jsonpath_ng.JSONPath,
path_constraint: jsonpath_ng.JSONPath,
min_common_prefix_length: int = 1,
) -> Optional[jsonpath_ng.JSONPath]:
"""
Narrow the `path` with a more specific `path_constraint`.
e.g. if the path constraints a slice `[*]` and the constraints a
specific index `[0]`, the `path` will be narrowed down to `[0]`.
"""
prefix_path = jsonpath_ng.Root()
common = True
common_prefix_length = 0
for p1, p2 in zip_longest(
jsonpath_parts(path_constraint),
jsonpath_parts(path),
):
if common and (n := METHOD_NAME(p1, p2)):
prefix_path = prefix_path.child(n)
common_prefix_length += 1
else:
common = False
if p2:
prefix_path = prefix_path.child(p2)
else:
break
if common_prefix_length < min_common_prefix_length:
return None
return prefix_path
def remove_prefix_from_path(
path: jsonpath_ng.JSONPath, prefix: jsonpath_ng.JSONPath
) -> Optional[jsonpath_ng.JSONPath]:
path_parts = jsonpath_parts(path, ignore_root=True)
prefix_parts = jsonpath_parts(prefix, ignore_root=True)
if len(path_parts) < len(prefix_parts):
return None
# check that the path is properly prefixed
for i, p in enumerate(prefix_parts):
if p != path_parts[i]:
return None
suffix = path_parts[len(prefix_parts) :]
if suffix:
return reduce(lambda a, b: a.child(b), suffix)
return None |
6,315 | combine std | # Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
from functools import partial
import numpy as np
import pandas as pd
from .... import opcodes
from ....serialization.serializables import BoolField
from ..aggregation import BaseDataFrameExpandingAgg
_stage_info = namedtuple(
"_stage_info",
(
"map_groups",
"map_sources",
"combine_sources",
"combine_columns",
"combine_funcs",
"key_to_funcs",
"valid_columns",
"min_periods_func_name",
),
)
_cum_alpha_coeff_func = "_cum_alpha_coeff"
_cum_square_alpha_coeff_func = "_cum_square_alpha_coeff"
def _add_pred_results(pred_results, local_results, axis=0):
if pred_results[0].ndim == 1:
df_filler = 0
else:
df_filler = pred_results[0].iloc[-1, :].dropna()
df_filler[:] = 0
new_locals = []
combine_axis = pred_results[0].ndim - axis - 1
for pred_result, local_result in zip(pred_results, local_results):
local_result = local_result.fillna(df_filler, axis=axis)
new_locals.append(
local_result.add(pred_result.sum(axis=axis), axis=combine_axis)
)
return new_locals
def _combine_arithmetic(pred_results, local_results, axis=0):
if pred_results is None:
return local_results[0]
return _add_pred_results(pred_results, local_results, axis=axis)[0]
def _combine_minmax(pred_results, local_results, axis=0, fun_name=None):
if pred_results is None:
return local_results[0]
pred_size = len(pred_results[0])
con = pd.concat([pred_results[0], local_results[0]], axis=axis)
result = con.expanding(axis=axis).agg(fun_name)
if result.ndim == 2:
return result.iloc[pred_size:, :] if axis == 0 else result.iloc[:, pred_size:]
else:
return result.iloc[pred_size:]
def _combine_mean(pred_results, local_results, axis=0):
local_sum_data, local_count_data = local_results
if pred_results is not None:
local_sum_data, local_count_data = _add_pred_results(
pred_results, local_results, axis=axis
)
return local_sum_data / local_count_data
def _combine_var(pred_results, local_results, axis=0):
local_sum_data, local_count_data, local_var_data = local_results
if pred_results is None:
return local_var_data * local_count_data / (local_count_data - 1)
pred_sum_data, pred_count_data, pred_var_data = pred_results
local_sum_square = (
local_count_data * local_var_data + local_sum_data**2 / local_count_data
)
pred_sum_square = (
pred_count_data * pred_var_data + pred_sum_data**2 / pred_count_data
)
local_sum_square, local_sum_data, local_count_data = _add_pred_results(
[pred_sum_square, pred_sum_data, pred_count_data],
[local_sum_square, local_sum_data, local_count_data],
axis=axis,
)
return (local_sum_square - local_sum_data**2 / local_count_data) / (
local_count_data - 1
)
def METHOD_NAME(pred_results, local_results, axis=0):
return np.sqrt(_combine_var(pred_results, local_results, axis=axis))
class DataFrameExpandingAgg(BaseDataFrameExpandingAgg):
_op_type_ = opcodes.EXPANDING_AGG
_center = BoolField("center")
def __init__(self, center=None, **kw):
super().__init__(_center=center, **kw)
@property
def center(self):
return self._center
@classmethod
def _get_stage_functions(cls, op: "DataFrameExpandingAgg", func):
if func == "_data_count":
return ["count"], _combine_arithmetic
elif func in ("sum", "prod", "count"):
return [func], _combine_arithmetic
elif func in ("min", "max"):
return [func], partial(_combine_minmax, fun_name=func)
elif func == "mean":
return ["sum", "count"], _combine_mean
elif func in {"var", "std"}:
return (
["sum", "count", "var"],
_combine_var if func == "var" else METHOD_NAME,
)
else: # pragma: no cover
raise NotImplementedError
@classmethod
def _execute_map_function(cls, op: "DataFrameExpandingAgg", func, in_data):
min_periods = 1 if op.min_periods > 0 else 0
expanding = in_data.expanding(
min_periods=min_periods, center=op.center, axis=op.axis
)
if func == "var":
result = expanding.var(ddof=0)
else:
result = expanding.agg(func)
if op.output_agg:
summary = result.iloc[len(result) - 1 : len(result)]
else:
summary = None
return result, summary
@classmethod
def _execute_combine_function(
cls, op: "DataFrameExpandingAgg", func, pred_inputs, local_inputs, func_cols
):
return func(pred_inputs, local_inputs, axis=op.axis)
@classmethod
def _execute_raw_function(cls, op: "DataFrameExpandingAgg", in_data):
expanding = in_data.expanding(
min_periods=op.min_periods, center=op.center, axis=op.axis
)
return expanding.agg(op.func) |
6,316 | epidemics prefetch | import logging
import datetime
import json
import requests
from databank.models import PastCrisesEvent, PastEpidemic, Month
from .utils import catch_error, get_country_by_iso3
logger = logging.getLogger(__name__)
DISASTER_API = 'https://api.reliefweb.int/v1/disasters/'
RELIEFWEB_DATETIME_FORMAT = '%Y-%m-%d'
def parse_date(date):
# Only works for reliefweb dates
# For python >= 3.7 RELIEFWEB_DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S%z'
return datetime.datetime.strptime(date.split('T')[0], RELIEFWEB_DATETIME_FORMAT)
def _crises_event_prefetch():
query_params = json.dumps({
'limit': 1000,
'filter': {
'operator': 'AND',
'conditions': [
{
'field': 'primary_type.code',
'value': [type_code for type_code, _ in PastCrisesEvent.CHOICES],
'operator': 'OR'
}
]
},
'fields': {
'include': ['date.created', 'primary_country.iso3', 'primary_type.code']
}
})
url = DISASTER_API
data = {}
while True:
response = requests.post(url, data=query_params)
response.raise_for_status()
response = response.json()
for disaster in response['data']:
disaster = disaster['fields']
iso3 = disaster['primary_country']['iso3'].upper()
pcountry = get_country_by_iso3(iso3)
if pcountry is None:
continue
iso2 = pcountry.alpha_2
dt = parse_date(disaster['date']['created'])
disaster_data = {
'event': disaster['primary_type']['code'],
'year': dt.year,
'month': dt.month,
}
if data.get(iso2) is None:
data[iso2] = [disaster_data]
else:
data[iso2].append(disaster_data)
if 'next' not in response['links']:
break
url = response['links']['next']['href']
return data
def METHOD_NAME():
query_params = json.dumps({
'limit': 1000,
'filter': {
'operator': 'AND',
'conditions': [
{
'field': 'primary_type.code',
'value': ['EP'],
},
]
},
'fields': {
'include': ['name', 'date.created', 'primary_country.iso3']
}
})
url = DISASTER_API
data = {}
while True:
response = requests.post(url, data=query_params)
response.raise_for_status()
response = response.json()
for epidemic in response['data']:
epidemic = epidemic['fields']
iso3 = epidemic['primary_country']['iso3'].upper()
pcountry = get_country_by_iso3(iso3)
if pcountry is None:
continue
iso2 = pcountry.alpha_2
dt = parse_date(epidemic['date']['created'])
name = epidemic['name']
selected_epidemic_type = None
# Simple Text Search
for epidemic_type, _ in PastEpidemic.CHOICES:
if epidemic_type.lower() in name.lower():
selected_epidemic_type = epidemic_type
if selected_epidemic_type is None:
continue
epidemic_data = {
'epidemic': selected_epidemic_type,
'year': dt.year,
'month': dt.month,
}
if data.get(iso2) is None:
data[iso2] = [epidemic_data]
else:
data[iso2].append(epidemic_data)
if 'next' not in response['links']:
break
url = response['links']['next']
return data
@catch_error()
def prefetch():
crises_event = _crises_event_prefetch()
epidemics = METHOD_NAME()
return {
'crises_event': crises_event,
'epidemics': epidemics,
}, len(crises_event) + len(epidemics), DISASTER_API
@catch_error()
def load(country, overview, relief_data):
if not country.iso or relief_data is None:
return
iso2 = country.iso.upper()
overview.past_crises_events = [
{
'id': index + 1,
'event': data['event'],
'year': data['year'],
'month': data['month'],
'month_display': str(Month.LABEL_MAP.get(data['month'])),
'event_display': str(PastCrisesEvent.LABEL_MAP.get(data['event'])),
} for index, data in enumerate(relief_data['crises_event'].get(iso2) or [])
]
overview.past_epidemics = [
{
'id': index + 1,
'epidemic': data['epidemic'],
'year': data['year'],
'month': data['month'],
'month_display': str(Month.LABEL_MAP.get(data['month'])),
'event_display': str(PastEpidemic.LABEL_MAP.get(data['epidemic'])),
} for index, data in enumerate(relief_data['epidemics'].get(iso2) or [])
]
overview.save() |
6,317 | set preferences | """User preference service.
Notes:
- Preferences are user-specific.
- For application settings use
:class:`abilian.services.settings.SettingsService`.
"""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from flask import Blueprint, Flask, g, redirect, request, url_for
from flask_login import current_user
from werkzeug.exceptions import InternalServerError
from abilian.core import signals
from abilian.core.extensions import db
from abilian.core.models.subjects import User
from abilian.i18n import _, _l
from abilian.services.auth.service import user_menu
from abilian.services.base import Service, ServiceState
from abilian.services.preferences.panel import PreferencePanel
from abilian.web.action import Endpoint
from abilian.web.nav import BreadcrumbItem, NavItem
from .models import UserPreference
if TYPE_CHECKING:
from abilian.app import Application
_PREF_NAV_ITEM = NavItem(
"user",
"preferences",
title=_l("Preferences"),
icon="cog",
url=lambda context: f"{request.url_root}preferences",
condition=lambda context: not current_user.is_anonymous,
)
user_menu.insert(0, _PREF_NAV_ITEM)
class PreferenceState(ServiceState):
panels: list[PreferencePanel]
blueprint: Blueprint
blueprint_registered: bool
def __init__(self, service: PreferenceService, *args: Any, **kwargs: Any):
super().__init__(service, *args, **kwargs)
self.panels = []
self.nav_paths = {}
self.breadcrumb_items = {}
self.blueprint_registered = False
class PreferenceService(Service):
"""Flask extension for a user-level preference service, with pluggable
panels."""
name = "preferences"
AppStateClass = PreferenceState
def init_app(self, app: Application, *panels: Any):
super().init_app(app)
with app.app_context():
self.setup_blueprint(app)
for panel in panels:
self.register_panel(panel)
def get_preferences(self, user: User | None = None) -> dict[str, Any]:
"""Return a string->value dictionnary representing the given user
preferences.
If no user is provided, the current user is used instead.
"""
if user is None:
user = current_user
return {pref.key: pref.value for pref in user.preferences}
def METHOD_NAME(self, user: User = None, **kwargs: Any):
"""Set preferences from keyword arguments."""
if user is None:
user = current_user
d = {pref.key: pref for pref in user.preferences}
for k, v in kwargs.items():
if k in d:
d[k].value = v
else:
d[k] = UserPreference(user=user, key=k, value=v)
db.session.add(d[k])
def clear_preferences(self, user: User = None):
"""Clear the user preferences."""
if user is None:
user = current_user
# don't delete UserPreference 1 by 1 with session.delete, else
# user.preferences is not updated until commit() (not flush()). see
# http://docs.sqlalchemy.org/en/rel_0_7/orm/session.html#deleting-from-collections
user.preferences = []
def register_panel(self, panel: PreferencePanel, app: Flask | None = None):
state = self.app_state if app is None else app.extensions[self.name]
if state.blueprint_registered:
raise ValueError(
"Extension already initialized for app, cannot add more panel"
)
state.panels.append(panel)
panel.preferences = self
rule = f"/{getattr(panel, 'path', panel.id)}"
endpoint = panel.id
abs_endpoint = f"preferences.{endpoint}"
if hasattr(panel, "get"):
state.blueprint.add_url_rule(rule, endpoint, panel.get)
if hasattr(panel, "post"):
endpoint += "_post"
state.blueprint.add_url_rule(rule, endpoint, panel.post, methods=["POST"])
state.breadcrumb_items[abs_endpoint] = BreadcrumbItem(
label=panel.label, icon=None, url=Endpoint(abs_endpoint)
)
def setup_blueprint(self, app: Flask):
bp = self.app_state.blueprint = Blueprint(
"preferences",
__name__,
template_folder="templates",
url_prefix="/preferences",
)
# we need to delay blueprint registration to allow adding more panels during
# initialization
@signals.components_registered.connect_via(app)
def register_bp(app: Flask):
app.register_blueprint(bp)
app.extensions[self.name].blueprint_registered = True
self.app_state.root_breadcrumb_item = BreadcrumbItem(
label=_("Preferences"), url=Endpoint("preferences.index")
)
bp.url_value_preprocessor(self.build_breadcrumbs)
@bp.context_processor
def inject_menu() -> dict[str, list[dict[str, Any]]]:
menu = []
for panel in self.app_state.panels:
if not panel.is_accessible():
continue
endpoint = f"preferences.{panel.id}"
active = endpoint == request.endpoint
entry = {
"endpoint": endpoint,
"label": panel.label,
"url": url_for(endpoint),
"active": active,
}
menu.append(entry)
return {"menu": menu}
@bp.route("/")
def index():
"""Index redirects to the first accessible panel."""
# Work around unit test failure. FIXME.
if current_user.is_anonymous:
return "OK"
for panel in self.app_state.panels:
if panel.is_accessible():
return redirect(url_for(f"preferences.{panel.id}"))
# Should not happen.
raise InternalServerError()
def build_breadcrumbs(self, endpoint, view_args):
state = self.app_state
g.nav["active"] = _PREF_NAV_ITEM.path
g.breadcrumb.append(state.root_breadcrumb_item)
if endpoint in state.breadcrumb_items:
g.breadcrumb.append(state.breadcrumb_items[endpoint])
preferences = PreferenceService() |
6,318 | test deserialize vocab seen entries | import pickle
import pytest
from thinc.api import get_current_ops
import spacy
from spacy.lang.en import English
from spacy.strings import StringStore
from spacy.tokens import Doc
from spacy.util import ensure_path, load_model
from spacy.vectors import Vectors
from spacy.vocab import Vocab
from ..util import make_tempdir
test_strings = [([], []), (["rats", "are", "cute"], ["i", "like", "rats"])]
test_strings_attrs = [(["rats", "are", "cute"], "Hello")]
@pytest.mark.issue(599)
def test_issue599(en_vocab):
doc = Doc(en_vocab)
doc2 = Doc(doc.vocab)
doc2.from_bytes(doc.to_bytes())
assert doc2.has_annotation("DEP")
@pytest.mark.issue(4054)
def test_issue4054(en_vocab):
"""Test that a new blank model can be made with a vocab from file,
and that serialization does not drop the language at any point."""
nlp1 = English()
vocab1 = nlp1.vocab
with make_tempdir() as d:
vocab_dir = ensure_path(d / "vocab")
if not vocab_dir.exists():
vocab_dir.mkdir()
vocab1.to_disk(vocab_dir)
vocab2 = Vocab().from_disk(vocab_dir)
nlp2 = spacy.blank("en", vocab=vocab2)
nlp_dir = ensure_path(d / "nlp")
if not nlp_dir.exists():
nlp_dir.mkdir()
nlp2.to_disk(nlp_dir)
nlp3 = load_model(nlp_dir)
assert nlp3.lang == "en"
@pytest.mark.issue(4133)
def test_issue4133(en_vocab):
nlp = English()
vocab_bytes = nlp.vocab.to_bytes()
words = ["Apple", "is", "looking", "at", "buying", "a", "startup"]
pos = ["NOUN", "VERB", "ADP", "VERB", "PROPN", "NOUN", "ADP"]
doc = Doc(en_vocab, words=words)
for i, token in enumerate(doc):
token.pos_ = pos[i]
# usually this is already True when starting from proper models instead of blank English
doc_bytes = doc.to_bytes()
vocab = Vocab()
vocab = vocab.from_bytes(vocab_bytes)
doc = Doc(vocab).from_bytes(doc_bytes)
actual = []
for token in doc:
actual.append(token.pos_)
assert actual == pos
@pytest.mark.parametrize("text", ["rat"])
def test_serialize_vocab(en_vocab, text):
text_hash = en_vocab.strings.add(text)
vocab_bytes = en_vocab.to_bytes(exclude=["lookups"])
new_vocab = Vocab().from_bytes(vocab_bytes)
assert new_vocab.strings[text_hash] == text
assert new_vocab.to_bytes(exclude=["lookups"]) == vocab_bytes
@pytest.mark.parametrize("strings1,strings2", test_strings)
def test_serialize_vocab_roundtrip_bytes(strings1, strings2):
vocab1 = Vocab(strings=strings1)
vocab2 = Vocab(strings=strings2)
vocab1_b = vocab1.to_bytes()
vocab2_b = vocab2.to_bytes()
if strings1 == strings2:
assert vocab1_b == vocab2_b
else:
assert vocab1_b != vocab2_b
vocab1 = vocab1.from_bytes(vocab1_b)
assert vocab1.to_bytes() == vocab1_b
new_vocab1 = Vocab().from_bytes(vocab1_b)
assert new_vocab1.to_bytes() == vocab1_b
assert len(new_vocab1.strings) == len(strings1)
assert sorted([s for s in new_vocab1.strings]) == sorted(strings1)
@pytest.mark.parametrize("strings1,strings2", test_strings)
def test_serialize_vocab_roundtrip_disk(strings1, strings2):
vocab1 = Vocab(strings=strings1)
vocab2 = Vocab(strings=strings2)
with make_tempdir() as d:
file_path1 = d / "vocab1"
file_path2 = d / "vocab2"
vocab1.to_disk(file_path1)
vocab2.to_disk(file_path2)
vocab1_d = Vocab().from_disk(file_path1)
vocab2_d = Vocab().from_disk(file_path2)
# check strings rather than lexemes, which are only reloaded on demand
assert set(strings1) == set([s for s in vocab1_d.strings])
assert set(strings2) == set([s for s in vocab2_d.strings])
if set(strings1) == set(strings2):
assert [s for s in vocab1_d.strings] == [s for s in vocab2_d.strings]
else:
assert [s for s in vocab1_d.strings] != [s for s in vocab2_d.strings]
@pytest.mark.parametrize("strings,lex_attr", test_strings_attrs)
def test_serialize_vocab_lex_attrs_bytes(strings, lex_attr):
vocab1 = Vocab(strings=strings)
vocab2 = Vocab()
vocab1[strings[0]].norm_ = lex_attr
assert vocab1[strings[0]].norm_ == lex_attr
assert vocab2[strings[0]].norm_ != lex_attr
vocab2 = vocab2.from_bytes(vocab1.to_bytes())
assert vocab2[strings[0]].norm_ == lex_attr
@pytest.mark.parametrize("strings,lex_attr", test_strings_attrs)
def METHOD_NAME(strings, lex_attr):
# Reported in #2153
vocab = Vocab(strings=strings)
vocab.from_bytes(vocab.to_bytes())
assert len(vocab.strings) == len(strings)
@pytest.mark.parametrize("strings,lex_attr", test_strings_attrs)
def test_serialize_vocab_lex_attrs_disk(strings, lex_attr):
vocab1 = Vocab(strings=strings)
vocab2 = Vocab()
vocab1[strings[0]].norm_ = lex_attr
assert vocab1[strings[0]].norm_ == lex_attr
assert vocab2[strings[0]].norm_ != lex_attr
with make_tempdir() as d:
file_path = d / "vocab"
vocab1.to_disk(file_path)
vocab2 = vocab2.from_disk(file_path)
assert vocab2[strings[0]].norm_ == lex_attr
@pytest.mark.parametrize("strings1,strings2", test_strings)
def test_serialize_stringstore_roundtrip_bytes(strings1, strings2):
sstore1 = StringStore(strings=strings1)
sstore2 = StringStore(strings=strings2)
sstore1_b = sstore1.to_bytes()
sstore2_b = sstore2.to_bytes()
if set(strings1) == set(strings2):
assert sstore1_b == sstore2_b
else:
assert sstore1_b != sstore2_b
sstore1 = sstore1.from_bytes(sstore1_b)
assert sstore1.to_bytes() == sstore1_b
new_sstore1 = StringStore().from_bytes(sstore1_b)
assert new_sstore1.to_bytes() == sstore1_b
assert set(new_sstore1) == set(strings1)
@pytest.mark.parametrize("strings1,strings2", test_strings)
def test_serialize_stringstore_roundtrip_disk(strings1, strings2):
sstore1 = StringStore(strings=strings1)
sstore2 = StringStore(strings=strings2)
with make_tempdir() as d:
file_path1 = d / "strings1"
file_path2 = d / "strings2"
sstore1.to_disk(file_path1)
sstore2.to_disk(file_path2)
sstore1_d = StringStore().from_disk(file_path1)
sstore2_d = StringStore().from_disk(file_path2)
assert set(sstore1_d) == set(sstore1)
assert set(sstore2_d) == set(sstore2)
if set(strings1) == set(strings2):
assert set(sstore1_d) == set(sstore2_d)
else:
assert set(sstore1_d) != set(sstore2_d)
@pytest.mark.parametrize("strings,lex_attr", test_strings_attrs)
def test_pickle_vocab(strings, lex_attr):
vocab = Vocab(strings=strings)
ops = get_current_ops()
vectors = Vectors(data=ops.xp.zeros((10, 10)), mode="floret", hash_count=1)
vocab.vectors = vectors
vocab[strings[0]].norm_ = lex_attr
vocab_pickled = pickle.dumps(vocab)
vocab_unpickled = pickle.loads(vocab_pickled)
assert vocab.to_bytes() == vocab_unpickled.to_bytes()
assert vocab_unpickled.vectors.mode == "floret" |
6,319 | system data | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetPrivateLinkServicesForM365ComplianceCenterResult',
'AwaitableGetPrivateLinkServicesForM365ComplianceCenterResult',
'get_private_link_services_for_m365_compliance_center',
'get_private_link_services_for_m365_compliance_center_output',
]
@pulumi.output_type
class GetPrivateLinkServicesForM365ComplianceCenterResult:
"""
The description of the service.
"""
def __init__(__self__, etag=None, id=None, identity=None, kind=None, location=None, name=None, properties=None, METHOD_NAME=None, tags=None, type=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if METHOD_NAME and not isinstance(METHOD_NAME, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", METHOD_NAME)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
An etag associated with the resource, used for optimistic concurrency when editing it.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
The resource identifier.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.ServicesResourceResponseIdentity']:
"""
Setting indicating whether the service has a managed identity associated with it.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def kind(self) -> str:
"""
The kind of the service.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> str:
"""
The resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.ServicesPropertiesResponse':
"""
The common properties of a service.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter(name="systemData")
def METHOD_NAME(self) -> 'outputs.SystemDataResponse':
"""
Required property for system data
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
The resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetPrivateLinkServicesForM365ComplianceCenterResult(GetPrivateLinkServicesForM365ComplianceCenterResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPrivateLinkServicesForM365ComplianceCenterResult(
etag=self.etag,
id=self.id,
identity=self.identity,
kind=self.kind,
location=self.location,
name=self.name,
properties=self.properties,
METHOD_NAME=self.METHOD_NAME,
tags=self.tags,
type=self.type)
def get_private_link_services_for_m365_compliance_center(resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPrivateLinkServicesForM365ComplianceCenterResult:
"""
Get the metadata of a privateLinkServicesForM365ComplianceCenter resource.
Azure REST API version: 2021-03-25-preview.
:param str resource_group_name: The name of the resource group that contains the service instance.
:param str resource_name: The name of the service instance.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:m365securityandcompliance:getPrivateLinkServicesForM365ComplianceCenter', __args__, opts=opts, typ=GetPrivateLinkServicesForM365ComplianceCenterResult).value
return AwaitableGetPrivateLinkServicesForM365ComplianceCenterResult(
etag=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
identity=pulumi.get(__ret__, 'identity'),
kind=pulumi.get(__ret__, 'kind'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
METHOD_NAME=pulumi.get(__ret__, 'system_data'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_private_link_services_for_m365_compliance_center)
def get_private_link_services_for_m365_compliance_center_output(resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPrivateLinkServicesForM365ComplianceCenterResult]:
"""
Get the metadata of a privateLinkServicesForM365ComplianceCenter resource.
Azure REST API version: 2021-03-25-preview.
:param str resource_group_name: The name of the resource group that contains the service instance.
:param str resource_name: The name of the service instance.
"""
... |
6,320 | url parameters | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"databricks access-connector delete",
)
class Delete(AAZCommand):
"""Delete the azure databricks accessConnector.
:example: Delete a databricks accessConnector
az databricks access-connector delete --resource-group MyResourceGroup --name MyAccessConnector
"""
_aaz_info = {
"version": "2022-10-01-preview",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.databricks/accessconnectors/{}", "2022-10-01-preview"],
]
}
AZ_SUPPORT_NO_WAIT = True
def _handler(self, command_args):
super()._handler(command_args)
return self.build_lro_poller(self._execute_operations, None)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.name = AAZStrArg(
options=["-n", "--name"],
help="The name of the azure databricks accessConnector.",
required=True,
id_part="name",
fmt=AAZStrArgFormat(
max_length=64,
min_length=3,
),
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
yield self.AccessConnectorsDelete(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
class AccessConnectorsDelete(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.METHOD_NAME,
)
if session.http_response.status_code in [200]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.METHOD_NAME,
)
if session.http_response.status_code in [204]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_204,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.METHOD_NAME,
)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/accessConnectors/{connectorName}",
**self.METHOD_NAME
)
@property
def method(self):
return "DELETE"
@property
def error_format(self):
return "ODataV4Format"
@property
def METHOD_NAME(self):
parameters = {
**self.serialize_url_param(
"connectorName", self.ctx.args.name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-10-01-preview",
required=True,
),
}
return parameters
def on_200(self, session):
pass
def on_204(self, session):
pass
class _DeleteHelper:
"""Helper class for Delete"""
__all__ = ["Delete"] |
6,321 | forget email in request teardown | """Miscellaneous background jobs."""
from __future__ import annotations
from collections import defaultdict
from functools import wraps
from typing import Callable
from typing_extensions import Protocol
import requests
from flask import g
from baseframe import statsd
from .. import app, rq
from ..extapi.boxoffice import Boxoffice
from ..extapi.explara import ExplaraAPI
from ..models import (
EmailAddress,
GeoName,
PhoneNumber,
Project,
ProjectLocation,
TicketClient,
db,
)
from ..signals import emailaddress_refcount_dropping, phonenumber_refcount_dropping
from ..typing import P, ResponseType, T_co
from .helpers import app_context
class RqJobProtocol(Protocol[P, T_co]):
"""Protocol for an RQ job function."""
def __call__(self, *args: P.args, **kwargs: P.kwargs) -> T_co:
...
# TODO: Replace return type with job id type
def queue(self, *args: P.args, **kwargs: P.kwargs) -> None:
...
# TODO: Add other methods and attrs (queue_name, schedule, cron, ...)
def rqjob(
queue: str = 'funnel', **rqargs
) -> Callable[[Callable[P, T_co]], RqJobProtocol[P, T_co]]:
"""Decorate an RQ job with app context."""
def decorator(f: Callable[P, T_co]) -> RqJobProtocol[P, T_co]:
@wraps(f)
def wrapper(*args: P.args, **kwargs: P.kwargs) -> T_co:
with app_context():
return f(*args, **kwargs)
return rq.job(queue, **rqargs)(wrapper)
return decorator
@rqjob()
def import_tickets(ticket_client_id: int) -> None:
"""Import tickets from Boxoffice."""
ticket_client = TicketClient.query.get(ticket_client_id)
if ticket_client is not None:
if ticket_client.name.lower() == 'explara':
ticket_list = ExplaraAPI(
access_token=ticket_client.client_access_token
).get_tickets(ticket_client.client_eventid)
ticket_client.import_from_list(ticket_list)
elif ticket_client.name.lower() == 'boxoffice':
ticket_list = Boxoffice(
access_token=ticket_client.client_access_token
).get_tickets(ticket_client.client_eventid)
ticket_client.import_from_list(ticket_list)
db.session.commit()
@rqjob()
def tag_locations(project_id: int) -> None:
"""
Tag a project with geoname locations.
This function used to retrieve data from Hascore, which has been merged into Funnel
and is available directly as the GeoName model. This code continues to operate with
the legacy Hascore data structure, and is pending rewrite.
"""
project = Project.query.get(project_id)
if not project.location:
return
results = GeoName.parse_locations(
project.location, special=["Internet", "Online"], bias=['IN', 'US']
)
geonames = defaultdict(dict)
tokens = []
for item in results:
if 'geoname' in item:
geoname = item['geoname'].as_dict(alternate_titles=False)
geonames[geoname['geonameid']]['geonameid'] = geoname['geonameid']
geonames[geoname['geonameid']]['primary'] = geonames[
geoname['geonameid']
].get('primary', True)
for gtype, related in geoname.get('related', {}).items():
if gtype in ['admin2', 'admin1', 'country', 'continent']:
geonames[related['geonameid']]['geonameid'] = related['geonameid']
geonames[related['geonameid']]['primary'] = False
tokens.append(
{
'token': item.get('token', ''),
'geoname': {
'name': geoname['name'],
'geonameid': geoname['geonameid'],
},
}
)
else:
tokens.append({'token': item.get('token', '')})
project.parsed_location = {'tokens': tokens}
for locdata in geonames.values():
loc = ProjectLocation.query.get((project_id, locdata['geonameid']))
if loc is None:
loc = ProjectLocation(project=project, geonameid=locdata['geonameid'])
db.session.add(loc)
db.session.flush()
loc.primary = locdata['primary']
for location in project.locations:
if location.geonameid not in geonames:
db.session.delete(location)
db.session.commit()
# TODO: Deprecate this method and the AuthClient notification system
@rqjob()
def send_auth_client_notice(url, params=None, data=None, method='POST'):
"""Send notice to AuthClient when some data changes."""
requests.request(method, url, params=params, data=data, timeout=30)
# If an email address had a reference count drop during the request, make a note of
# its email_hash, and at the end of the request, queue a background job. The job will
# call .refcount() and if it still has zero references, it will be marked as forgotten
# by having the email column set to None.
# It is possible for an email address to have its refcount drop and rise again within
# the request, so it's imperative to wait until the end of the request before attempting
# to forget it. Ideally, this job should wait even longer, for several minutes or even
# up to a day.
@emailaddress_refcount_dropping.connect
def METHOD_NAME(sender: EmailAddress) -> None:
if g: # Only do this if we have an app context
if not hasattr(g, 'forget_email_hashes'):
g.forget_email_hashes = set()
g.forget_email_hashes.add(sender.email_hash)
@phonenumber_refcount_dropping.connect
def forget_phone_in_request_teardown(sender: PhoneNumber) -> None:
if g: # Only do this if we have an app context
if not hasattr(g, 'forget_phone_hashes'):
g.forget_phone_hashes = set()
g.forget_phone_hashes.add(sender.phone_hash)
@app.after_request
def forget_email_phone_in_background_job(response: ResponseType) -> ResponseType:
if hasattr(g, 'forget_email_hashes'):
for email_hash in g.forget_email_hashes:
forget_email.queue(email_hash)
if hasattr(g, 'forget_phone_hashes'):
for phone_hash in g.forget_phone_hashes:
forget_phone.queue(phone_hash)
return response
@rqjob()
def forget_email(email_hash: str) -> None:
"""Remove an email address if it has no inbound references."""
email_address = EmailAddress.get(email_hash=email_hash)
if email_address is not None and email_address.refcount() == 0:
app.logger.info("Forgetting email address with hash %s", email_hash)
email_address.email = None
db.session.commit()
statsd.incr('email_address.forgotten')
@rqjob()
def forget_phone(phone_hash: str) -> None:
"""Remove a phone number if it has no inbound references."""
phone_number = PhoneNumber.get(phone_hash=phone_hash)
if phone_number is not None and phone_number.refcount() == 0:
app.logger.info("Forgetting phone number with hash %s", phone_hash)
phone_number.mark_forgotten()
db.session.commit()
statsd.incr('phone_number.forgotten') |
6,322 | stream content | from __future__ import annotations
import asyncio
import ssl
from typing import (
TYPE_CHECKING,
Any,
AsyncGenerator,
AsyncIterator,
Dict,
Iterable,
List,
Optional,
Tuple,
Type,
Union,
cast,
)
import certifi
from aiohttp import BasicAuth, ClientError, ClientSession, FormData, TCPConnector
from aiohttp.hdrs import USER_AGENT
from aiohttp.http import SERVER_SOFTWARE
from aiogram.__meta__ import __version__
from aiogram.methods import TelegramMethod
from ...exceptions import TelegramNetworkError
from ...methods.base import TelegramType
from ...types import InputFile
from .base import BaseSession
if TYPE_CHECKING:
from ..bot import Bot
_ProxyBasic = Union[str, Tuple[str, BasicAuth]]
_ProxyChain = Iterable[_ProxyBasic]
_ProxyType = Union[_ProxyChain, _ProxyBasic]
def _retrieve_basic(basic: _ProxyBasic) -> Dict[str, Any]:
from aiohttp_socks.utils import parse_proxy_url # type: ignore
proxy_auth: Optional[BasicAuth] = None
if isinstance(basic, str):
proxy_url = basic
else:
proxy_url, proxy_auth = basic
proxy_type, host, port, username, password = parse_proxy_url(proxy_url)
if isinstance(proxy_auth, BasicAuth):
username = proxy_auth.login
password = proxy_auth.password
return {
"proxy_type": proxy_type,
"host": host,
"port": port,
"username": username,
"password": password,
"rdns": True,
}
def _prepare_connector(chain_or_plain: _ProxyType) -> Tuple[Type["TCPConnector"], Dict[str, Any]]:
from aiohttp_socks import ( # type: ignore
ChainProxyConnector,
ProxyConnector,
ProxyInfo,
)
# since tuple is Iterable(compatible with _ProxyChain) object, we assume that
# user wants chained proxies if tuple is a pair of string(url) and BasicAuth
if isinstance(chain_or_plain, str) or (
isinstance(chain_or_plain, tuple) and len(chain_or_plain) == 2
):
chain_or_plain = cast(_ProxyBasic, chain_or_plain)
return ProxyConnector, _retrieve_basic(chain_or_plain)
chain_or_plain = cast(_ProxyChain, chain_or_plain)
infos: List[ProxyInfo] = []
for basic in chain_or_plain:
infos.append(ProxyInfo(**_retrieve_basic(basic)))
return ChainProxyConnector, {"proxy_infos": infos}
class AiohttpSession(BaseSession):
def __init__(self, proxy: Optional[_ProxyType] = None, **kwargs: Any) -> None:
super().__init__(**kwargs)
self._session: Optional[ClientSession] = None
self._connector_type: Type[TCPConnector] = TCPConnector
self._connector_init: Dict[str, Any] = {
"ssl": ssl.create_default_context(cafile=certifi.where()),
}
self._should_reset_connector = True # flag determines connector state
self._proxy: Optional[_ProxyType] = None
if proxy is not None:
try:
self._setup_proxy_connector(proxy)
except ImportError as exc: # pragma: no cover
raise RuntimeError(
"In order to use aiohttp client for proxy requests, install "
"https://pypi.org/project/aiohttp-socks/"
) from exc
def _setup_proxy_connector(self, proxy: _ProxyType) -> None:
self._connector_type, self._connector_init = _prepare_connector(proxy)
self._proxy = proxy
@property
def proxy(self) -> Optional[_ProxyType]:
return self._proxy
@proxy.setter
def proxy(self, proxy: _ProxyType) -> None:
self._setup_proxy_connector(proxy)
self._should_reset_connector = True
async def create_session(self) -> ClientSession:
if self._should_reset_connector:
await self.close()
if self._session is None or self._session.closed:
self._session = ClientSession(
connector=self._connector_type(**self._connector_init),
headers={
USER_AGENT: f"{SERVER_SOFTWARE} aiogram/{__version__}",
},
)
self._should_reset_connector = False
return self._session
async def close(self) -> None:
if self._session is not None and not self._session.closed:
await self._session.close()
def build_form_data(self, bot: Bot, method: TelegramMethod[TelegramType]) -> FormData:
form = FormData(quote_fields=False)
files: Dict[str, InputFile] = {}
for key, value in method.model_dump(warnings=False).items():
value = self.prepare_value(value, bot=bot, files=files)
if not value:
continue
form.add_field(key, value)
for key, value in files.items():
form.add_field(
key,
value.read(bot),
filename=value.filename or key,
)
return form
async def make_request(
self, bot: Bot, method: TelegramMethod[TelegramType], timeout: Optional[int] = None
) -> TelegramType:
session = await self.create_session()
url = self.api.api_url(token=bot.token, method=method.__api_method__)
form = self.build_form_data(bot=bot, method=method)
try:
async with session.post(
url, data=form, timeout=self.timeout if timeout is None else timeout
) as resp:
raw_result = await resp.text()
except asyncio.TimeoutError:
raise TelegramNetworkError(method=method, message="Request timeout error")
except ClientError as e:
raise TelegramNetworkError(method=method, message=f"{type(e).__name__}: {e}")
response = self.check_response(
bot=bot, method=method, status_code=resp.status, content=raw_result
)
return cast(TelegramType, response.result)
async def METHOD_NAME(
self,
url: str,
headers: Optional[Dict[str, Any]] = None,
timeout: int = 30,
chunk_size: int = 65536,
raise_for_status: bool = True,
) -> AsyncGenerator[bytes, None]:
if headers is None:
headers = {}
session = await self.create_session()
async with session.get(
url, timeout=timeout, headers=headers, raise_for_status=raise_for_status
) as resp:
async for chunk in resp.content.iter_chunked(chunk_size):
yield chunk
async def __aenter__(self) -> AiohttpSession:
await self.create_session()
return self |
6,323 | write packages | # Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/pylint-dev/pylint/blob/main/LICENSE
# Copyright (c) https://github.com/pylint-dev/pylint/blob/main/CONTRIBUTORS.txt
"""Utilities for creating diagrams."""
from __future__ import annotations
import argparse
import itertools
import os
from collections.abc import Iterable
from astroid import modutils, nodes
from pylint.pyreverse.diagrams import (
ClassDiagram,
ClassEntity,
DiagramEntity,
PackageDiagram,
PackageEntity,
)
from pylint.pyreverse.printer import EdgeType, NodeProperties, NodeType, Printer
from pylint.pyreverse.printer_factory import get_printer_for_filetype
from pylint.pyreverse.utils import is_exception
class DiagramWriter:
"""Base class for writing project diagrams."""
def __init__(self, config: argparse.Namespace) -> None:
self.config = config
self.printer_class = get_printer_for_filetype(self.config.output_format)
self.printer: Printer # defined in set_printer
self.file_name = "" # defined in set_printer
self.depth = self.config.max_color_depth
# default colors are an adaption of the seaborn colorblind palette
self.available_colors = itertools.cycle(self.config.color_palette)
self.used_colors: dict[str, str] = {}
def write(self, diadefs: Iterable[ClassDiagram | PackageDiagram]) -> None:
"""Write files for <project> according to <diadefs>."""
for diagram in diadefs:
basename = diagram.title.strip().replace("/", "_").replace(" ", "_")
file_name = f"{basename}.{self.config.output_format}"
if os.path.exists(self.config.output_directory):
file_name = os.path.join(self.config.output_directory, file_name)
self.set_printer(file_name, basename)
if isinstance(diagram, PackageDiagram):
self.METHOD_NAME(diagram)
else:
self.write_classes(diagram)
self.save()
def METHOD_NAME(self, diagram: PackageDiagram) -> None:
"""Write a package diagram."""
module_info: dict[str, dict[str, int]] = {}
# sorted to get predictable (hence testable) results
for module in sorted(diagram.modules(), key=lambda x: x.title):
module.fig_id = module.node.qname()
if self.config.no_standalone and not any(
module in (rel.from_object, rel.to_object)
for rel in diagram.get_relationships("depends")
):
continue
self.printer.emit_node(
module.fig_id,
type_=NodeType.PACKAGE,
properties=self.get_package_properties(module),
)
module_info[module.fig_id] = {
"imports": 0,
"imported": 0,
}
# package dependencies
for rel in diagram.get_relationships("depends"):
from_id = rel.from_object.fig_id
to_id = rel.to_object.fig_id
self.printer.emit_edge(
from_id,
to_id,
type_=EdgeType.USES,
)
module_info[from_id]["imports"] += 1
module_info[to_id]["imported"] += 1
for rel in diagram.get_relationships("type_depends"):
from_id = rel.from_object.fig_id
to_id = rel.to_object.fig_id
self.printer.emit_edge(
from_id,
to_id,
type_=EdgeType.TYPE_DEPENDENCY,
)
module_info[from_id]["imports"] += 1
module_info[to_id]["imported"] += 1
print(
f"Analysed {len(module_info)} modules with a total "
f"of {sum(mod['imports'] for mod in module_info.values())} imports"
)
def write_classes(self, diagram: ClassDiagram) -> None:
"""Write a class diagram."""
# sorted to get predictable (hence testable) results
for obj in sorted(diagram.objects, key=lambda x: x.title):
obj.fig_id = obj.node.qname()
if self.config.no_standalone and not any(
obj in (rel.from_object, rel.to_object)
for rel_type in ("specialization", "association", "aggregation")
for rel in diagram.get_relationships(rel_type)
):
continue
self.printer.emit_node(
obj.fig_id,
type_=NodeType.CLASS,
properties=self.get_class_properties(obj),
)
# inheritance links
for rel in diagram.get_relationships("specialization"):
self.printer.emit_edge(
rel.from_object.fig_id,
rel.to_object.fig_id,
type_=EdgeType.INHERITS,
)
# generate associations
for rel in diagram.get_relationships("association"):
self.printer.emit_edge(
rel.from_object.fig_id,
rel.to_object.fig_id,
label=rel.name,
type_=EdgeType.ASSOCIATION,
)
# generate aggregations
for rel in diagram.get_relationships("aggregation"):
self.printer.emit_edge(
rel.from_object.fig_id,
rel.to_object.fig_id,
label=rel.name,
type_=EdgeType.AGGREGATION,
)
def set_printer(self, file_name: str, basename: str) -> None:
"""Set printer."""
self.printer = self.printer_class(basename)
self.file_name = file_name
def get_package_properties(self, obj: PackageEntity) -> NodeProperties:
"""Get label and shape for packages."""
return NodeProperties(
label=obj.title,
color=self.get_shape_color(obj) if self.config.colorized else "black",
)
def get_class_properties(self, obj: ClassEntity) -> NodeProperties:
"""Get label and shape for classes."""
properties = NodeProperties(
label=obj.title,
attrs=obj.attrs if not self.config.only_classnames else None,
methods=obj.methods if not self.config.only_classnames else None,
fontcolor="red" if is_exception(obj.node) else "black",
color=self.get_shape_color(obj) if self.config.colorized else "black",
)
return properties
def get_shape_color(self, obj: DiagramEntity) -> str:
"""Get shape color."""
qualified_name = obj.node.qname()
if modutils.is_stdlib_module(qualified_name.split(".", maxsplit=1)[0]):
return "grey"
if isinstance(obj.node, nodes.ClassDef):
package = qualified_name.rsplit(".", maxsplit=2)[0]
elif obj.node.package:
package = qualified_name
else:
package = qualified_name.rsplit(".", maxsplit=1)[0]
base_name = ".".join(package.split(".", self.depth)[: self.depth])
if base_name not in self.used_colors:
self.used_colors[base_name] = next(self.available_colors)
return self.used_colors[base_name]
def save(self) -> None:
"""Write to disk."""
self.printer.generate(self.file_name) |
6,324 | post | import logging
from app_analytics.analytics_db_service import (
get_total_events_count,
get_usage_data,
)
from app_analytics.tasks import track_feature_evaluation
from app_analytics.track import track_feature_evaluation_influxdb
from django.conf import settings
from drf_yasg.utils import swagger_auto_schema
from rest_framework import status
from rest_framework.decorators import api_view, permission_classes
from rest_framework.fields import IntegerField
from rest_framework.generics import CreateAPIView, GenericAPIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.serializers import Serializer
from telemetry.serializers import TelemetrySerializer
from environments.authentication import EnvironmentKeyAuthentication
from environments.permissions.permissions import EnvironmentKeyPermissions
from features.models import FeatureState
from organisations.models import Organisation
from .permissions import UsageDataPermission
from .serializers import (
UsageDataQuerySerializer,
UsageDataSerializer,
UsageTotalCountSerializer,
)
logger = logging.getLogger(__name__)
class SDKAnalyticsFlags(GenericAPIView):
"""
Class to handle flag analytics events
"""
permission_classes = (EnvironmentKeyPermissions,)
authentication_classes = (EnvironmentKeyAuthentication,)
def get_serializer_class(self):
if getattr(self, "swagger_fake_view", False):
return Serializer
environment_feature_names = set(
FeatureState.objects.filter(
environment=self.request.environment,
feature_segment=None,
identity=None,
).values_list("feature__name", flat=True)
)
class _AnalyticsSerializer(Serializer):
def get_fields(self):
return {
feature_name: IntegerField(required=False)
for feature_name in environment_feature_names
}
return _AnalyticsSerializer
def METHOD_NAME(self, request, *args, **kwargs):
"""
Send flag evaluation events from the SDK back to the API for reporting.
"""
is_valid = self._is_data_valid()
if not is_valid:
# for now, return 200 to avoid breaking client integrations
return Response(
{"detail": "Invalid data. Not logged."},
content_type="application/json",
status=status.HTTP_200_OK,
)
if settings.USE_POSTGRES_FOR_ANALYTICS:
track_feature_evaluation.delay(args=(request.environment.id, request.data))
if settings.INFLUXDB_TOKEN:
track_feature_evaluation_influxdb(request.environment.id, request.data)
return Response(status=status.HTTP_200_OK)
def _is_data_valid(self) -> bool:
environment_feature_names = set(
FeatureState.objects.filter(
environment=self.request.environment,
feature_segment=None,
identity=None,
).values_list("feature__name", flat=True)
)
is_valid = True
for feature_name, request_count in self.request.data.items():
if not (
isinstance(feature_name, str)
and feature_name in environment_feature_names
):
logger.warning("Feature %s does not belong to project", feature_name)
is_valid = False
if not (isinstance(request_count, int)):
logger.error(
"Analytics data contains non integer request count. User agent: %s",
self.request.headers.get("User-Agent", "Not found"),
)
is_valid = False
return is_valid
class SelfHostedTelemetryAPIView(CreateAPIView):
"""
Class to handle telemetry events from self hosted APIs so we can aggregate and track
self hosted installation data
"""
permission_classes = ()
authentication_classes = ()
serializer_class = TelemetrySerializer
@swagger_auto_schema(
responses={200: UsageTotalCountSerializer()},
methods=["GET"],
)
@api_view(["GET"])
@permission_classes([IsAuthenticated, UsageDataPermission])
def get_usage_data_total_count_view(request, organisation_pk=None):
organisation = Organisation.objects.get(id=organisation_pk)
count = get_total_events_count(organisation)
serializer = UsageTotalCountSerializer(data={"count": count})
serializer.is_valid(raise_exception=True)
return Response(serializer.data)
@swagger_auto_schema(
query_serializer=UsageDataQuerySerializer(),
responses={200: UsageDataSerializer()},
methods=["GET"],
)
@api_view(["GET"])
@permission_classes([IsAuthenticated, UsageDataPermission])
def get_usage_data_view(request, organisation_pk=None):
filters = UsageDataQuerySerializer(data=request.query_params)
filters.is_valid(raise_exception=True)
organisation = Organisation.objects.get(id=organisation_pk)
usage_data = get_usage_data(organisation, **filters.data)
serializer = UsageDataSerializer(usage_data, many=True)
return Response(serializer.data) |
6,325 | field | """
amplitude
=========
Autogenerated DPF operator classes.
"""
from warnings import warn
from ansys.dpf.core.dpf_operator import Operator
from ansys.dpf.core.inputs import Input, _Inputs
from ansys.dpf.core.outputs import Output, _Outputs
from ansys.dpf.core.operators.specification import PinSpecification, Specification
class amplitude(Operator):
"""Computes amplitude of a real and an imaginary field.
Parameters
----------
fieldA : Field or FieldsContainer
Field or fields container with only one field
is expected
fieldB : Field or FieldsContainer
Field or fields container with only one field
is expected
Examples
--------
>>> from ansys.dpf import core as dpf
>>> # Instantiate operator
>>> op = dpf.operators.math.amplitude()
>>> # Make input connections
>>> my_fieldA = dpf.Field()
>>> op.inputs.fieldA.connect(my_fieldA)
>>> my_fieldB = dpf.Field()
>>> op.inputs.fieldB.connect(my_fieldB)
>>> # Instantiate operator and connect inputs in one line
>>> op = dpf.operators.math.amplitude(
... fieldA=my_fieldA,
... fieldB=my_fieldB,
... )
>>> # Get output data
>>> result_field = op.outputs.field()
"""
def __init__(self, fieldA=None, fieldB=None, config=None, server=None):
super().__init__(name="amplitude", config=config, server=server)
self._inputs = InputsAmplitude(self)
self._outputs = OutputsAmplitude(self)
if fieldA is not None:
self.inputs.fieldA.connect(fieldA)
if fieldB is not None:
self.inputs.fieldB.connect(fieldB)
@staticmethod
def _spec():
description = """Computes amplitude of a real and an imaginary field."""
spec = Specification(
description=description,
map_input_pin_spec={
0: PinSpecification(
name="fieldA",
type_names=["field", "fields_container"],
optional=False,
document="""Field or fields container with only one field
is expected""",
),
1: PinSpecification(
name="fieldB",
type_names=["field", "fields_container"],
optional=False,
document="""Field or fields container with only one field
is expected""",
),
},
map_output_pin_spec={
0: PinSpecification(
name="field",
type_names=["field"],
optional=False,
document="""""",
),
},
)
return spec
@staticmethod
def default_config(server=None):
"""Returns the default config of the operator.
This config can then be changed to the user needs and be used to
instantiate the operator. The Configuration allows to customize
how the operation will be processed by the operator.
Parameters
----------
server : server.DPFServer, optional
Server with channel connected to the remote or local instance. When
``None``, attempts to use the global server.
"""
return Operator.default_config(name="amplitude", server=server)
@property
def inputs(self):
"""Enables to connect inputs to the operator
Returns
--------
inputs : InputsAmplitude
"""
return super().inputs
@property
def outputs(self):
"""Enables to get outputs of the operator by evaluating it
Returns
--------
outputs : OutputsAmplitude
"""
return super().outputs
class InputsAmplitude(_Inputs):
"""Intermediate class used to connect user inputs to
amplitude operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.amplitude()
>>> my_fieldA = dpf.Field()
>>> op.inputs.fieldA.connect(my_fieldA)
>>> my_fieldB = dpf.Field()
>>> op.inputs.fieldB.connect(my_fieldB)
"""
def __init__(self, op: Operator):
super().__init__(amplitude._spec().inputs, op)
self._fieldA = Input(amplitude._spec().input_pin(0), 0, op, -1)
self._inputs.append(self._fieldA)
self._fieldB = Input(amplitude._spec().input_pin(1), 1, op, -1)
self._inputs.append(self._fieldB)
@property
def fieldA(self):
"""Allows to connect fieldA input to the operator.
Field or fields container with only one field
is expected
Parameters
----------
my_fieldA : Field or FieldsContainer
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.amplitude()
>>> op.inputs.fieldA.connect(my_fieldA)
>>> # or
>>> op.inputs.fieldA(my_fieldA)
"""
return self._fieldA
@property
def fieldB(self):
"""Allows to connect fieldB input to the operator.
Field or fields container with only one field
is expected
Parameters
----------
my_fieldB : Field or FieldsContainer
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.amplitude()
>>> op.inputs.fieldB.connect(my_fieldB)
>>> # or
>>> op.inputs.fieldB(my_fieldB)
"""
return self._fieldB
class OutputsAmplitude(_Outputs):
"""Intermediate class used to get outputs from
amplitude operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.amplitude()
>>> # Connect inputs : op.inputs. ...
>>> result_field = op.outputs.field()
"""
def __init__(self, op: Operator):
super().__init__(amplitude._spec().outputs, op)
self._field = Output(amplitude._spec().output_pin(0), 0, op)
self._outputs.append(self._field)
@property
def METHOD_NAME(self):
"""Allows to get field output of the operator
Returns
----------
my_field : Field
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.amplitude()
>>> # Connect inputs : op.inputs. ...
>>> result_field = op.outputs.field()
""" # noqa: E501
return self._field |
6,326 | into orig type | # noqa E501: Ported from https://github.com/BUTSpeechFIT/speakerbeam/blob/main/src/models/adapt_layers.py
# Copyright (c) 2021 Brno University of Technology
# Copyright (c) 2021 Nippon Telegraph and Telephone corporation (NTT).
# All rights reserved
# By Katerina Zmolikova, August 2021.
from functools import partial
import torch
import torch.nn as nn
def make_adapt_layer(type, indim, enrolldim, ninputs=1):
adapt_class = adaptation_layer_types.get(type)
return adapt_class(indim, enrolldim, ninputs)
def into_tuple(x):
"""Transforms tensor/list/tuple into tuple."""
if isinstance(x, list):
return tuple(x)
elif isinstance(x, torch.Tensor):
return (x,)
elif isinstance(x, tuple):
return x
else:
raise ValueError("x should be tensor, list of tuple")
def METHOD_NAME(x, orig_type):
"""Inverts into_tuple function."""
if orig_type is tuple:
return x
if orig_type is list:
return list(x)
if orig_type is torch.Tensor:
return x[0]
else:
assert False
class ConcatAdaptLayer(nn.Module):
def __init__(self, indim, enrolldim, ninputs=1):
super().__init__()
self.ninputs = ninputs
self.transform = nn.ModuleList(
[nn.Linear(indim + enrolldim, indim) for _ in range(ninputs)]
)
def forward(self, main, enroll):
"""ConcatAdaptLayer forward.
Args:
main: tensor or tuple or list
activations in the main neural network, which are adapted
tuple/list may be useful when we want to apply the adaptation
to both normal and skip connection at once
enroll: tensor or tuple or list
embedding extracted from enrollment
tuple/list may be useful when we want to apply the adaptation
to both normal and skip connection at once
"""
assert type(main) == type(enroll)
orig_type = type(main)
main, enroll = into_tuple(main), into_tuple(enroll)
assert len(main) == len(enroll) == self.ninputs
out = []
for transform, main0, enroll0 in zip(self.transform, main, enroll):
out.append(
transform(
torch.cat(
(main0, enroll0[:, :, None].expand(main0.shape)), dim=1
).permute(0, 2, 1)
).permute(0, 2, 1)
)
return METHOD_NAME(tuple(out), orig_type)
class MulAddAdaptLayer(nn.Module):
def __init__(self, indim, enrolldim, ninputs=1, do_addition=True):
super().__init__()
self.ninputs = ninputs
self.do_addition = do_addition
if do_addition:
assert enrolldim == 2 * indim, (enrolldim, indim)
else:
assert enrolldim == indim, (enrolldim, indim)
def forward(self, main, enroll):
"""MulAddAdaptLayer Forward.
Args:
main: tensor or tuple or list
activations in the main neural network, which are adapted
tuple/list may be useful when we want to apply the adaptation
to both normal and skip connection at once
enroll: tensor or tuple or list
embedding extracted from enrollment
tuple/list may be useful when we want to apply the adaptation
to both normal and skip connection at once
"""
assert type(main) == type(enroll)
orig_type = type(main)
main, enroll = into_tuple(main), into_tuple(enroll)
assert len(main) == len(enroll) == self.ninputs, (
len(main),
len(enroll),
self.ninputs,
)
out = []
for main0, enroll0 in zip(main, enroll):
if self.do_addition:
enroll0_mul, enroll0_add = torch.chunk(enroll0, 2, dim=1)
out.append(enroll0_mul[:, :, None] * main0 + enroll0_add[:, :, None])
else:
out.append(enroll0[:, :, None] * main0)
return METHOD_NAME(tuple(out), orig_type)
# aliases for possible adaptation layer types
adaptation_layer_types = {
"concat": ConcatAdaptLayer,
"muladd": MulAddAdaptLayer,
"mul": partial(MulAddAdaptLayer, do_addition=False),
} |
6,327 | failed | # -----------------------------------------------------------------------------
# Copyright (c) 2012 - 2023, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
# -----------------------------------------------------------------------------
"""
"""
# -----------------------------------------------------------------------------
# Boilerplate
# -----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
# Standard library imports
from typing import TYPE_CHECKING
# Bokeh imports
from bokeh.application.handlers.code_runner import CodeRunner
from bokeh.application.handlers.handler import Handler
from bokeh.core.types import PathLike
from bokeh.io.doc import curdoc, set_curdoc
if TYPE_CHECKING:
from bokeh.document import Document
# -----------------------------------------------------------------------------
# Globals and constants
# -----------------------------------------------------------------------------
__all__ = (
"ExampleHandler",
)
# -----------------------------------------------------------------------------
# General API
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Dev API
# -----------------------------------------------------------------------------
class ExampleHandler(Handler):
"""A stripped-down handler similar to CodeHandler but that does
some appropriate monkeypatching.
"""
_output_funcs = ["output_notebook", "output_file", "reset_output"]
_io_funcs = ["show", "save"]
def __init__(self, source: str, filename: PathLike) -> None:
super().__init__()
self._runner = CodeRunner(source, filename, [])
def modify_document(self, doc: Document) -> None:
if self.METHOD_NAME:
return
module = self._runner.new_module()
doc.modules.add(module)
orig_curdoc = curdoc()
set_curdoc(doc)
old_io, old_doc = self._monkeypatch()
try:
self._runner.run(module, lambda: None)
finally:
self._unmonkeypatch(old_io, old_doc)
set_curdoc(orig_curdoc)
def _monkeypatch(self):
def _pass(*args, **kw):
pass
def _add_root(obj, *args, **kw):
curdoc().add_root(obj)
def _curdoc(*args, **kw):
return curdoc()
# these functions are transitively imported from io into plotting,
# so we have to patch them all. Assumption is that no other patching
# has occurred, i.e. we can just save the funcs being patched once,
# from io, and use those as the originals to replace everywhere
import bokeh.io as io # lgtm [py/import-and-import-from]
import bokeh.plotting as p
mods = [io, p]
old_io = {}
for f in self._output_funcs + self._io_funcs:
old_io[f] = getattr(io, f)
for mod in mods:
for f in self._output_funcs:
setattr(mod, f, _pass)
for f in self._io_funcs:
setattr(mod, f, _add_root)
import bokeh.document as d
old_doc = d.Document
d.Document = _curdoc
return old_io, old_doc
def _unmonkeypatch(self, old_io, old_doc):
import bokeh.io as io # lgtm [py/import-and-import-from]
import bokeh.plotting as p
mods = [io, p]
for mod in mods:
for f in old_io:
setattr(mod, f, old_io[f])
import bokeh.document as d
d.Document = old_doc
@property
def METHOD_NAME(self) -> bool:
return self._runner.METHOD_NAME
@property
def error(self) -> str | None:
return self._runner.error
@property
def error_detail(self) -> str | None:
return self._runner.error_detail
@property
def doc(self) -> str | None:
return self._runner.doc
# -----------------------------------------------------------------------------
# Private API
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Code
# ----------------------------------------------------------------------------- |
6,328 | select tui | import os
import sys
import termios
import tty
from typing import Callable, List, Optional, Tuple
def buffered_print() -> Tuple[Callable, Callable]:
buffer = []
def __print(*args):
for arg in args:
buffer.append(arg)
def __show():
nonlocal buffer
print("".join(buffer), flush=True, end="")
buffer = []
return __print, __show
# Allows for exactly one print per render, removing any weird flashing
# behavior and also speeding things up considerably
_print, _show = buffered_print()
def clear(k: int):
"""
Clear `k` lines below the cursor, returning the cursor to its original position
"""
_print(f"\x1b[2K\x1b[1E" * (k) + f"\x1b[{k}F")
def draw_box(
ul_corner_pos: Tuple[int, int],
height: int,
width: int,
color: Optional[str] = None,
):
if height <= 0 or width <= 0:
return
move_cursor(ul_corner_pos)
draw_horizontal_line(width, make_corner=True, color=color)
draw_vertical_line(height, make_corner=True, color=color)
draw_horizontal_line(width, left=True, make_corner=True, color=color)
draw_vertical_line(height, up=True, make_corner=True, color=color)
def clear_screen():
_print("\x1b[2J")
def remove_cursor():
_print("\x1b[?25l")
def reveal_cursor():
_print("\x1b[?25h")
def move_cursor(pos: Tuple[int, int]):
"""
Move the cursor to a given (x, y) coordinate
"""
x, y = pos
if x < 0 or y < 0:
return
_print(f"\x1b[{y};{x}H")
def move_cursor_up(n: int):
if n <= 0:
return
_print(f"\x1b[{n}A")
def line_up(n: int):
"""Moves to the start of the destination line"""
if n <= 0:
return
_print(f"\x1b[{n}F")
def move_cursor_down(n: int):
if n <= 0:
return
_print(f"\x1b[{n}B")
def line_down(n: int):
"""Moves to the start of the destination line"""
if n <= 0:
return
_print(f"\x1b[{n}E")
def move_cursor_right(n: int):
if n <= 0:
return
_print(f"\x1b[{n}C")
def move_cursor_left(n: int):
if n <= 0:
return
_print(f"\x1b[{n}D")
def current_cursor_position() -> Tuple[int, int]:
res = b""
sys.stdout.write("\x1b[6n")
sys.stdout.flush()
while not res.endswith(b"R"):
res += sys.stdin.buffer.read(1)
y, x = res.strip(b"\x1b[R").split(b";")
return int(x), int(y)
def draw_vertical_line(
height: int,
up: bool = False,
make_corner: bool = False,
color: Optional[str] = None,
):
"""
Draws a vertical line with given `height`, going upwards if `up` is True
and downwards otherwise.
"""
if height <= 0:
return
if color is not None:
_print(color)
sep = "\x1b[1A" if up else "\x1b[1B"
for i in range(height):
if i == 0 and make_corner:
corner = "\u2514" if up else "\u2510"
_print(f"{corner}\x1b[1D{sep}")
else:
_print(f"\u2502\x1b[1D{sep}")
if color is not None:
_print("\x1b[0m")
def draw_horizontal_line(
width: int,
left: bool = False,
make_corner: bool = False,
color: Optional[str] = None,
):
"""
Draws a horizontal line with given `width`, going to the left if `left` is True
and to the right otherwise.
"""
if width <= 0:
return
if color is not None:
_print(color)
sep = "\x1b[2D" if left else ""
for i in range(width):
if i == 0 and make_corner:
corner = "\u2518" if left else "\u250c"
_print(f"{corner}{sep}")
else:
_print(f"\u2500{sep}")
if color is not None:
_print("\x1b[0m")
def read_next_byte() -> bytes:
b = sys.stdin.buffer.read(1)
if b in (
b"\x03", # CTRL C
b"\x04", # CTRL D
b"q",
b"Q",
):
raise KeyboardInterrupt
return b
def read_bytes(num_bytes: int) -> bytes:
if num_bytes < 0:
raise ValueError(f"cannot read {num_bytes} bytes")
result = b""
for _ in range(num_bytes):
result += read_next_byte()
return result
def METHOD_NAME(title: str, options: List[str], clear_terminal: bool = True):
"""
Renders a terminal UI that allows users to select one of the options
listed in `options`
Args:
title: The title of the selection window.
options: A list of names for each of the options.
clear_terminal: Whether or not to clear the entire terminal window
before displaying - default False
"""
if len(options) == 0:
raise ValueError("No options given")
def render(
curr_selected: int,
start_index: int = 0,
max_per_page: int = 10,
indent: str = " ",
) -> int:
if curr_selected < 0 or curr_selected >= len(options):
curr_selected = 0
_print(title)
line_down(2)
num_lines_rendered = 4 # 4 "extra" lines for header + footer
for i in range(start_index, start_index + max_per_page):
if i >= len(options):
break
name = options[i]
if i == curr_selected:
color = "\x1b[38;5;40m"
bold = "\x1b[1m"
reset = "\x1b[0m"
_print(f"{indent}{color}{bold}{name}{reset}\x1b[1E")
else:
_print(f"{indent}{name}\x1b[1E")
num_lines_rendered += 1
line_down(1)
control_str = "[ARROW-KEYS] Navigate\t[ENTER] Select\t[Q] Quit"
_print(control_str)
line_up(num_lines_rendered - 1)
_show()
return num_lines_rendered
old_settings = termios.tcgetattr(sys.stdin.fileno())
tty.setraw(sys.stdin.fileno())
curr_selected = 0
start_index = 0
_, term_height = os.get_terminal_size()
remove_cursor()
if not clear_terminal:
_, curs_height = current_cursor_position()
max_per_page = term_height - curs_height - 4
else:
clear_screen()
move_cursor((0, 0))
max_per_page = term_height - 4
num_lines_rendered = render(
curr_selected,
start_index=start_index,
max_per_page=max_per_page,
)
try:
while True:
b = read_bytes(1)
if b == b"\r":
return options[curr_selected]
elif b == b"\x1b":
b = read_bytes(2)
if b == b"[A": # Up Arrow
curr_selected = max(curr_selected - 1, 0)
if (
curr_selected - start_index < max_per_page // 2
and start_index > 0
):
start_index -= 1
elif b == b"[B": # Down Arrow
curr_selected = min(curr_selected + 1, len(options) - 1)
if (
curr_selected - start_index > max_per_page // 2
and start_index < len(options) - max_per_page
):
start_index += 1
else:
continue
clear(num_lines_rendered)
num_lines_rendered = render(
curr_selected,
start_index=start_index,
max_per_page=max_per_page,
)
except KeyboardInterrupt:
...
finally:
clear(num_lines_rendered)
reveal_cursor()
_show()
termios.tcsetattr(sys.stdin.fileno(), termios.TCSANOW, old_settings) |
6,329 | test omp props | # Copyright 2019-2022 ETH Zurich and the DaCe authors. All rights reserved.
import dace
from dace import dtypes, nodes
from typing import Any, Dict, List, Union
import numpy as np
N = dace.symbol("N")
@dace.program
def arrayop(inp: dace.float32[N], out: dace.float32[N]):
for i in dace.map[0:N]:
out[i] = 2 * inp[i]
def key_exists(d: Union[List[Any], Dict[str, Any]], key: str):
if isinstance(d, list):
for item in d:
if key_exists(item, key):
return True
elif isinstance(d, dict):
for k, v in d.items():
if k == key:
return True
if key_exists(v, key):
return True
return False
def test_lack_of_omp_props():
sdfg = arrayop.to_sdfg(simplify=True)
for node, _ in sdfg.all_nodes_recursive():
if isinstance(node, nodes.EntryNode):
assert (isinstance(node, nodes.MapEntry))
node.map.schedule = dtypes.ScheduleType.Sequential
break
json = sdfg.to_json()
assert (not key_exists(json, 'omp_num_threads'))
assert (not key_exists(json, 'omp_schedule'))
assert (not key_exists(json, 'omp_chunk_size'))
def METHOD_NAME():
sdfg = arrayop.to_sdfg(simplify=True)
mapnode = None
for node, _ in sdfg.all_nodes_recursive():
if isinstance(node, nodes.EntryNode):
assert (isinstance(node, nodes.MapEntry))
mapnode = node.map
break
mapnode.schedule = dtypes.ScheduleType.CPU_Multicore
json = sdfg.to_json()
assert (key_exists(json, 'omp_num_threads'))
assert (key_exists(json, 'omp_schedule'))
assert (key_exists(json, 'omp_chunk_size'))
code = sdfg.generate_code()[0].clean_code
assert ("#pragma omp parallel for" in code)
mapnode.omp_num_threads = 10
code = sdfg.generate_code()[0].clean_code
assert ("#pragma omp parallel for num_threads(10)" in code)
mapnode.omp_schedule = dtypes.OMPScheduleType.Guided
code = sdfg.generate_code()[0].clean_code
assert ("#pragma omp parallel for schedule(guided) num_threads(10)" in code)
mapnode.omp_chunk_size = 5
code = sdfg.generate_code()[0].clean_code
assert ("#pragma omp parallel for schedule(guided, 5) num_threads(10)" in code)
def test_omp_parallel():
@dace.program
def tester(A: dace.float64[1]):
for t in dace.map[0:1] @ dace.ScheduleType.CPU_Persistent:
A[0] += 1
sdfg = tester.to_sdfg()
me = next(n for n, _ in sdfg.all_nodes_recursive() if isinstance(n, dace.nodes.MapEntry))
me.map.omp_num_threads = 2
code = sdfg.generate_code()[0].clean_code
assert ("#pragma omp parallel num_threads(2)" in code)
a = np.random.rand(1)
ref = a + 2
sdfg(a)
assert np.allclose(a, ref)
def test_omp_parallel_for_in_parallel():
"""
Tests that an OpenMP map inside a parallel section ends up without an
extra (semantically-incorrect) ``parallel`` statement.
"""
@dace.program
def tester(A: dace.float64[20]):
for t in dace.map[0:1] @ dace.ScheduleType.CPU_Persistent:
for i in dace.map[0:20] @ dace.ScheduleType.CPU_Multicore:
A[i] += 1
sdfg = tester.to_sdfg()
code = sdfg.generate_code()[0].clean_code
assert "#pragma omp parallel" in code
assert "#pragma omp for" in code
a = np.random.rand(20)
ref = a + 1
sdfg(a)
assert np.allclose(a, ref)
def test_omp_get_tid():
@dace.program
def tester(A: dace.float64[20]):
for t in dace.map[0:1] @ dace.ScheduleType.CPU_Persistent:
A[t] += 1
sdfg = tester.to_sdfg()
me = next(n for n, _ in sdfg.all_nodes_recursive() if isinstance(n, dace.nodes.MapEntry))
me.map.omp_num_threads = 2
code = sdfg.generate_code()[0].clean_code
assert "#pragma omp parallel num_threads(2)" in code
assert "omp_get_thread_num()" in code
a = np.random.rand(20)
ref = np.copy(a)
ref[:2] += 1
sdfg(a)
assert np.allclose(a, ref)
def test_omp_get_tid_elision():
@dace.program
def tester(A: dace.float64[20]):
for t in dace.map[0:1] @ dace.ScheduleType.CPU_Persistent:
A[0] += 1
sdfg = tester.to_sdfg()
code = sdfg.generate_code()[0].clean_code
assert "omp_get_thread_num()" not in code
def test_omp_get_ntid():
__omp_num_threads = dace.symbol('__omp_num_threads')
@dace.program
def tester(A: dace.int64[1]):
for _ in dace.map[0:__omp_num_threads] @ dace.ScheduleType.CPU_Persistent:
A[0] = __omp_num_threads
sdfg = tester.to_sdfg()
code = sdfg.generate_code()[0].clean_code
assert "omp_get_num_threads()" in code
me = next(n for n, _ in sdfg.all_nodes_recursive() if isinstance(n, dace.nodes.MapEntry))
me.map.omp_num_threads = 3
a = np.zeros([1], dtype=np.int64)
sdfg(a, __omp_num_threads=1) # Feed in some other value
assert np.allclose(a, 3)
if __name__ == "__main__":
test_lack_of_omp_props()
METHOD_NAME()
test_omp_parallel()
test_omp_parallel_for_in_parallel()
test_omp_get_tid()
test_omp_get_tid_elision()
test_omp_get_ntid() |
6,330 | train | #!/usr/bin/env python
# coding=utf-8
# BSD 3-Clause License
#
# Copyright (c) 2017,
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Original source: https://github.com/pytorch/examples/blob/master/mnist/main.py
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(4*4*50, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4*4*50)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def METHOD_NAME(args, model, device, train_loader, optimizer, epoch):
model.METHOD_NAME()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
def test(args, model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
def main():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=128, metavar='N',
help='input batch size for training (default: 128)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=5, metavar='N',
help='number of epochs to train (default: 5)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=False,
help='For Saving the current Model')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('./data', METHOD_NAME=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('./data', METHOD_NAME=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
model = Net().to(device)
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
for epoch in range(1, args.epochs + 1):
METHOD_NAME(args, model, device, train_loader, optimizer, epoch)
test(args, model, device, test_loader)
if (args.save_model):
torch.save(model.state_dict(),"mnist_cnn.pt")
if __name__ == '__main__':
main() |
6,331 | set state |
# Copyright 2009-2015 Jaap Karssenberg <jaap.karssenberg@gmail.com>
# Tests: search gui.TestDialogs.testSearchDialog
from gi.repository import Gtk
from gi.repository import GObject
import logging
from zim.notebook import Path
from zim.gui.widgets import Dialog, BrowserTreeView, InputEntry, ErrorDialog, ScrolledWindow, StatusPage
from zim.gui.pageview import FIND_REGEX
from zim.search import *
logger = logging.getLogger('zim.gui.searchdialog')
class SearchDialog(Dialog):
READY = 0
SEARCHING = 1
DONE = 2
CANCELLED = 3
def __init__(self, widget, notebook, page, navigation):
Dialog.__init__(self, widget, _('Search'), # T: Dialog title
buttons=Gtk.ButtonsType.CLOSE, help='Help:Searching',
defaultwindowsize=(400, 300)
)
self.page = page
hbox = Gtk.HBox(spacing=5)
self.vbox.pack_start(hbox, False, True, 0)
search_label = Gtk.Label.new_with_mnemonic(_('_Search') + ': ')
hbox.pack_start(search_label, False, True, 0) # T: input label
self.query_entry = InputEntry()
hbox.add(self.query_entry)
search_label.set_mnemonic_widget(self.query_entry)
self.search_button = Gtk.Button.new_with_mnemonic(_('_Find')) # T: Button label
hbox.pack_start(self.search_button, False, True, 0)
self.spinner = Gtk.Spinner()
hbox.pack_start(self.spinner, False, True, 0)
self.cancel_button = Gtk.Button.new_with_mnemonic(_('_Cancel')) # T: Button label
hbox.pack_start(self.cancel_button, False, True, 0)
help_text = _(
'For advanced search you can use operators like\n'
'AND, OR and NOT. See the help page for more details.'
) # T: help text for the search dialog
self.query_entry.set_tooltip_text(help_text)
self.namespacecheckbox = Gtk.CheckButton.new_with_mnemonic(_('_Limit search to the current page and sub-pages'))
# T: checkbox option in search dialog
if page is not None:
self.vbox.pack_start(self.namespacecheckbox, False, True, 0)
# TODO advanced query editor
# TODO checkbox _('Match c_ase')
# TODO checkbox _('Whole _word')
self.results_treeview = SearchResultsTreeView(notebook, navigation)
self._stack = Gtk.Stack()
for name, widget in (
('ready', StatusPage('edit-find-symbolic', None)),
('searching', StatusPage('edit-find-symbolic', _('Searching ...'))), # T: placeholder label when search has started
('no-results', StatusPage('edit-find-symbolic', _('No results'))), # T: placeholder label when search has no results
('results', ScrolledWindow(self.results_treeview)),
):
widget.show_all()
self._stack.add_named(widget, name)
self.vbox.pack_start(self._stack, True, True, 0)
self.search_button.connect_object('clicked', self.__class__._search, self)
self.cancel_button.connect_object('clicked', self.__class__._cancel, self)
self.query_entry.connect_object('activate', self.__class__._search, self)
self.METHOD_NAME(self.READY)
def search(self, query):
'''Trigger a search to be performed.
Because search can take a long time to execute it is best to
call this method after the dialog is shown.
@param query: the query as string
'''
self.query_entry.set_text(query)
self._search()
def _search(self):
string = self.query_entry.get_text()
if self.namespacecheckbox.get_active():
assert self.page is not None
string = 'Section: "%s" ' % self.page.name + string
#~ print('!! QUERY: ' + string)
self.results_treeview.hasresults = False # XXX reset state before starting new search
self.METHOD_NAME(self.SEARCHING)
try:
self.results_treeview.search(string)
except Exception as error:
ErrorDialog(self, error).run()
if not self.results_treeview.cancelled:
self.METHOD_NAME(self.DONE)
else:
self.METHOD_NAME(self.CANCELLED)
def _cancel(self):
self.results_treeview.cancelled = True
def METHOD_NAME(self, state):
def hide(button):
button.hide()
button.set_no_show_all(True)
def show(button):
button.set_no_show_all(False)
button.show_all()
if state in (self.READY, self.DONE, self.CANCELLED):
self.query_entry.set_sensitive(True)
hide(self.cancel_button)
if self.spinner:
self.spinner.stop()
hide(self.spinner)
show(self.search_button)
if state == self.READY:
self._stack.set_visible_child_name('ready')
elif self.results_treeview.hasresults:
self._stack.set_visible_child_name('results')
else:
self._stack.set_visible_child_name('no-results')
elif state == self.SEARCHING:
self.query_entry.set_sensitive(False)
hide(self.search_button)
if self.spinner:
show(self.spinner)
self.spinner.start()
show(self.cancel_button)
if self.results_treeview.hasresults:
self._stack.set_visible_child_name('results')
else:
self._stack.set_visible_child_name('searching')
else:
assert False, 'BUG: invalid state'
class SearchResultsTreeView(BrowserTreeView):
NAME_COL = 0
SCORE_COL = 1
PATH_COL = 2
def __init__(self, notebook, navigation):
model = Gtk.ListStore(str, int, object)
# NAME_COL, SCORE_COL, PATH_COL
BrowserTreeView.__init__(self, model)
self.navigation = navigation
self.query = None
self.selection = SearchSelection(notebook)
self.cancelled = False
self.hasresults = False
cell_renderer = Gtk.CellRendererText()
for name, i in (
(_('Page'), 0), # T: Column header search dialog
(_('Score'), 1), # T: Column header search dialog
):
column = Gtk.TreeViewColumn(name, cell_renderer, text=i)
column.set_sort_column_id(i)
if i == 0:
column.set_expand(True)
self.append_column(column)
model.set_sort_column_id(1, Gtk.SortType.DESCENDING)
# By default sort by score
self.connect('row-activated', self._do_open_page)
self.connect('destroy', self.__class__._cancel)
def _cancel(self):
self.cancelled = True
def search(self, query):
query = query.strip()
if not query:
return
logger.info('Searching for: %s', query)
self.get_model().clear()
self.cancelled = False
self.hasresults = False
self.query = Query(query)
self.selection.search(self.query, callback=self._search_callback)
self._update_results(self.selection)
def _search_callback(self, results, path):
# Returning False will cancel the search
#~ print('!! CB', path)
if results is not None:
self._update_results(results)
while Gtk.events_pending():
Gtk.main_iteration_do(False)
return not self.cancelled
def _update_results(self, results):
model = self.get_model()
if not model:
return
# Update score for paths that are already present
order = []
seen = set()
i = -1
for i, row in enumerate(model):
path = row[self.PATH_COL]
if path in results:
score = results.scores.get(path, row[self.SCORE_COL])
else:
score = -1 # went missing !??? - technically a bug
row[self.SCORE_COL] = score
order.append((score, i))
seen.add(path)
# Add new paths
new = results - seen
for path in new:
score = results.scores.get(path, 0)
model.append((path.name, score, path))
i += 1
order.append((score, i))
# re-order
#order.sort() # sort on first item, which is score
#model.reorder([x[1] for x in order]) # use second item
self.hasresults = len(model) > 0
def _do_open_page(self, view, path, col):
page = Path(self.get_model()[path][0])
pageview = self.navigation.open_page(page)
# Popup find dialog with same query
if pageview and self.query:
find_string, find_needs_regex = self.query.find_input
if find_string:
flag = FIND_REGEX if find_needs_regex else 0
pageview.show_find(find_string, flags=flag, highlight=True) |
6,332 | test list array nbytes | # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
import numpy as np
import pytest # noqa: F401
import awkward as ak
def test():
np_data = np.random.random(size=(4, 100 * 1024 * 1024 // 8 // 4))
array = ak.operations.from_numpy(np_data, regulararray=False)
assert np_data.nbytes == array.nbytes
def test_NumpyArray_nbytes():
np_data = np.random.random(size=(4, 100 * 1024 * 1024 // 8 // 4))
array = ak.contents.numpyarray.NumpyArray(np_data)
assert array.nbytes == np_data.nbytes
def test_ByteMaskedArray_nbytes():
content = ak.operations.from_iter(
[
[[1.1, 0.0, 2.2], [], [3.3, 4.4]],
[],
[[5.5]],
[[6.6, 9.9, 8.8, 7.7]],
[[], [12.2, 11.1, 10.0]],
],
highlevel=False,
)
mask = ak.index.Index8(np.array([0, 0, 1, 1, 0], dtype=np.int8))
array = ak.contents.ByteMaskedArray(mask, content, valid_when=False)
assert array.nbytes == 221
def test_BitMaskedArray_nbytes():
np_array = np.array(
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6]
)
np_index = np.array(
[
1,
1,
1,
1,
0,
0,
0,
0,
1,
0,
1,
0,
1,
],
dtype=np.uint8,
)
array = ak.contents.bitmaskedarray.BitMaskedArray(
ak.index.Index(np.packbits(np_index)),
ak.contents.numpyarray.NumpyArray(np_array),
valid_when=True,
length=13,
lsb_order=False,
)
assert np_array.nbytes == np_array.dtype.itemsize * len(np_array)
assert np_index.nbytes == np_index.dtype.itemsize * len(np_index)
assert np.packbits(np_index).nbytes == 2
assert array.nbytes == np_array.nbytes + np.packbits(np_index).nbytes
array = ak.contents.bitmaskedarray.BitMaskedArray(
ak.index.Index(np.packbits(np_index)),
ak.contents.numpyarray.NumpyArray(
np_array,
),
valid_when=True,
length=13,
lsb_order=False,
)
assert array.nbytes == np_array.nbytes + np.packbits(np_index).nbytes
def test_EmptyArray_nbytes():
array = ak.contents.emptyarray.EmptyArray()
assert array.nbytes == 0
def test_IndexedArray_nbytes():
np_index = np.array([2, 2, 0, 1, 4, 5, 4])
np_content = np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])
array = ak.contents.indexedarray.IndexedArray(
ak.index.Index(np_index),
ak.contents.numpyarray.NumpyArray(np_content),
)
assert array.nbytes == np_index.nbytes + np_content.nbytes
def test_IndexedOptionArray_nbytes():
np_index = np.array([2, 2, -1, 1, -1, 5, 4])
np_content = np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])
array = ak.contents.indexedoptionarray.IndexedOptionArray(
ak.index.Index(np_index),
ak.contents.numpyarray.NumpyArray(np_content),
)
assert array.nbytes == np_index.nbytes + np_content.nbytes
def METHOD_NAME():
np_starts = np.array([4, 100, 1])
np_stops = np.array([7, 100, 3, 200])
np_content = np.array([6.6, 4.4, 5.5, 7.7, 3.3, 2.2, 1.1, 8.8])
array = ak.contents.listarray.ListArray(
ak.index.Index(np_starts),
ak.index.Index(np_stops),
ak.contents.numpyarray.NumpyArray(np_content),
)
assert array.nbytes == np_starts.nbytes + np_stops.nbytes + np_content.nbytes
def test_ListOffsetArray_nbytes():
np_offsets = np.array([7, 10, 10, 200])
np_content = np.array([6.6, 4.4, 5.5, 7.7, 3.3, 2.2, 1.1, 8.8])
array = ak.contents.ListOffsetArray(
ak.index.Index(np_offsets),
ak.contents.numpyarray.NumpyArray(np_content),
)
assert array.nbytes == np_offsets.nbytes + np_content.nbytes
def test_RecordArray_nbytes():
np_content = np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])
array = ak.contents.recordarray.RecordArray(
[ak.contents.numpyarray.NumpyArray(np_content)],
["nest"],
)
assert array.nbytes == np_content.nbytes
def test_RegularArray_nbytes():
np_content = np.array([0.0, 1.1, 2.2, 33.33, 4.4, 5.5, -6.6])
array = ak.contents.regulararray.RegularArray(
ak.contents.recordarray.RecordArray(
[ak.contents.numpyarray.NumpyArray(np_content)],
["nest"],
),
3,
)
assert array.nbytes == np_content.nbytes
def test_UnionArray_nbytes():
np_tags = np.array([1, 1, 0, 0, 1, 0, 1], dtype=np.int8)
np_index = np.array([4, 3, 0, 1, 2, 2, 4, 100])
np_content1 = np.array([1, 2, 3])
np_content2 = np.array([[1.1], [2.2], [3.3], [4.4], [5.5]])
array = ak.contents.unionarray.UnionArray(
ak.index.Index(np_tags),
ak.index.Index(np_index),
[
ak.contents.numpyarray.NumpyArray(np_content1),
ak.contents.numpyarray.NumpyArray(np_content2),
],
)
assert (
array.nbytes
== np_tags.nbytes + np_index.nbytes + np_content1.nbytes + np_content2.nbytes
)
def test_UnmaskedArray_nbytes():
np_content = np.array([0.0, 2.2, 1.1, 3.3], dtype=np.float64)
array = ak.contents.unmaskedarray.UnmaskedArray(
ak.contents.numpyarray.NumpyArray(np_content)
)
assert array.nbytes == np_content.nbytes
def test_highlevel():
ak_Array = ak.highlevel.Array
array = ak_Array([0.0, 1.1, 2.2, 3.3, 4.4])
assert array.nbytes == array.layout.nbytes |
6,333 | setup | """
ShadowPlacer.py places a shadow.
It traces a line from a light source to the opposing surface.
Or it may do that later, right now it puts a node on the surface under
the its parent node.
"""
__all__ = ['ShadowPlacer']
from direct.controls.ControlManager import CollisionHandlerRayStart
from direct.directnotify import DirectNotifyGlobal
from panda3d.core import (
BitMask32,
CollisionHandlerFloor,
CollisionNode,
CollisionRay,
CollisionTraverser,
NodePath,
)
from . import DirectObject
class ShadowPlacer(DirectObject.DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory("ShadowPlacer")
if __debug__:
count = 0
activeCount = 0
# special methods
def __init__(self, cTrav, shadowNodePath,
wallCollideMask, floorCollideMask):
self.isActive = 0 # Is the placer "on". This is also printed in the debugCall.
assert self.notify.debugCall()
DirectObject.DirectObject.__init__(self)
self.METHOD_NAME(cTrav, shadowNodePath,
wallCollideMask, floorCollideMask)
if __debug__:
self.count += 1
self.debugDisplay()
def METHOD_NAME(self, cTrav, shadowNodePath,
wallCollideMask, floorCollideMask):
"""
Set up the collisions
"""
assert self.notify.debugCall()
assert not shadowNodePath.isEmpty()
assert not hasattr(self, "cTrav") # Protect from setup() being called again.
if not cTrav:
# set up the shadow collision traverser
base.initShadowTrav()
cTrav = base.shadowTrav
self.cTrav = cTrav
self.shadowNodePath = shadowNodePath
floorOffset = 0.025
# Set up the collison ray
# This is a ray cast down to detect floor polygons
self.cRay = CollisionRay(0.0, 0.0, CollisionHandlerRayStart, 0.0, 0.0, -1.0)
cRayNode = CollisionNode('shadowPlacer')
cRayNode.addSolid(self.cRay)
self.cRayNodePath = NodePath(cRayNode)
self.cRayBitMask = floorCollideMask
cRayNode.setFromCollideMask(self.cRayBitMask)
cRayNode.setIntoCollideMask(BitMask32.allOff())
# set up floor collision mechanism
self.lifter = CollisionHandlerFloor()
#self.lifter.setInPattern("on-floor")
#self.lifter.setOutPattern("off-floor")
self.lifter.setOffset(floorOffset)
self.lifter.setReach(4.0)
# activate the collider with the traverser and pusher
#self.on()
self.lifter.addCollider(self.cRayNodePath, shadowNodePath)
def delete(self):
assert self.notify.debugCall()
self.off()
if __debug__:
assert not self.isActive
self.count -= 1
self.debugDisplay()
del self.cTrav
del self.shadowNodePath
del self.cRay
#del self.cRayNode
self.cRayNodePath.removeNode()
del self.cRayNodePath
del self.lifter
def on(self):
"""
Turn on the shadow placement. The shadow z position will
start being updated until a call to off() is made.
"""
assert self.notify.debugCall("activeCount=%s"%(self.activeCount,))
if self.isActive:
assert self.cTrav.hasCollider(self.cRayNodePath)
return
assert not self.cTrav.hasCollider(self.cRayNodePath)
self.cRayNodePath.reparentTo(self.shadowNodePath.getParent())
self.cTrav.addCollider(self.cRayNodePath, self.lifter)
self.isActive = 1
if __debug__:
self.activeCount += 1
self.debugDisplay()
def off(self):
"""
Turn off the shadow placement. The shadow will still be
there, but the z position will not be updated until a call
to on() is made.
"""
assert self.notify.debugCall("activeCount=%s"%(self.activeCount,))
if not self.isActive:
assert not self.cTrav.hasCollider(self.cRayNodePath)
return
assert self.cTrav.hasCollider(self.cRayNodePath)
didIt = self.cTrav.removeCollider(self.cRayNodePath)
assert didIt
# Now that we have disabled collisions, make one more pass
# right now to ensure we aren't standing in a wall.
self.oneTimeCollide()
self.cRayNodePath.detachNode()
self.isActive = 0
if __debug__:
self.activeCount -= 1
self.debugDisplay()
def oneTimeCollide(self):
"""
Makes one quick collision pass for the avatar, for instance as
a one-time straighten-things-up operation after collisions
have been disabled.
"""
assert self.notify.debugCall()
tempCTrav = CollisionTraverser("oneTimeCollide")
tempCTrav.addCollider(self.cRayNodePath, self.lifter)
tempCTrav.traverse(render)
def resetToOrigin(self):
if self.shadowNodePath:
self.shadowNodePath.setPos(0,0,0)
if __debug__:
def debugDisplay(self):
"""for debugging"""
if self.notify.getDebug():
message = "%d active (%d total), %d colliders"%(
self.activeCount, self.count, self.cTrav.getNumColliders())
self.notify.debug(message)
onScreenDebug.add("ShadowPlacers", message)
return 1 # to allow assert self.debugDisplay() |
6,334 | clear | import numpy as np
from PyQt5.QtCore import QRectF, QLineF, QPointF, Qt
from PyQt5.QtGui import QPainter, QFont, QFontMetrics, QPen, QTransform, QBrush
from urh import settings
from urh.ui.painting.ZoomableScene import ZoomableScene
from urh.util import util
from urh.util.Formatter import Formatter
class GridScene(ZoomableScene):
def __init__(self, parent=None):
self.draw_grid = False
self.font_metrics = QFontMetrics(QFont())
self.center_freq = 433.92e6
self.frequencies = []
self.frequency_marker = None
super().__init__(parent)
self.setSceneRect(0, 0, 10, 10)
def drawBackground(self, painter: QPainter, rect: QRectF):
if self.draw_grid and len(self.frequencies) > 0:
painter.setPen(QPen(painter.pen().color(), 0))
parent_width = self.parent().width() if hasattr(self.parent(), "width") else 750
view_rect = self.parent().view_rect() if hasattr(self.parent(), "view_rect") else rect
font_width = self.font_metrics.width(Formatter.big_value_with_suffix(self.center_freq) + " ")
x_grid_size = int(view_rect.width() / parent_width * font_width)
# x_grid_size = int(0.1 * view_rect.width()) if 0.1 * view_rect.width() > 1 else 1
y_grid_size = 1
x_mid = np.where(self.frequencies == 0)[0]
x_mid = int(x_mid[0]) if len(x_mid) > 0 else 0
left = int(rect.left()) - (int(rect.left()) % x_grid_size)
left = left if left > 0 else 0
top = rect.top() - (rect.top() % y_grid_size)
bottom = rect.bottom() - (rect.bottom() % y_grid_size)
right_border = int(rect.right()) if rect.right() < len(self.frequencies) else len(self.frequencies)
scale_x, scale_y = util.calc_x_y_scale(rect, self.parent())
fh = self.font_metrics.height()
x_range = list(range(x_mid, left, -x_grid_size)) + list(range(x_mid, right_border, x_grid_size))
lines = [QLineF(x, rect.top(), x, bottom-fh*scale_y) for x in x_range] \
+ [QLineF(rect.left(), y, rect.right(), y) for y in np.arange(top, bottom, y_grid_size)]
pen = painter.pen()
pen.setStyle(Qt.DotLine)
painter.setPen(pen)
painter.drawLines(lines)
painter.scale(scale_x, scale_y)
counter = -1 # Counter for Label for every second line
for x in x_range:
freq = self.frequencies[x]
counter += 1
if freq == 0:
counter = 0
if freq != 0 and (counter % 2 != 0): # Label for every second line
continue
value = Formatter.big_value_with_suffix(self.center_freq + freq, 2)
font_width = self.font_metrics.width(value)
painter.drawText(QPointF(x / scale_x - font_width / 2, bottom / scale_y), value)
def draw_frequency_marker(self, x_pos, frequency):
if frequency is None:
self.clear_frequency_marker()
return
y1 = self.sceneRect().y()
y2 = self.sceneRect().y() + self.sceneRect().height()
if self.frequency_marker is None:
pen = QPen(settings.LINECOLOR, 0)
self.frequency_marker = [None, None]
self.frequency_marker[0] = self.addLine(x_pos, y1, x_pos, y2, pen)
self.frequency_marker[1] = self.addSimpleText("")
self.frequency_marker[1].setBrush(QBrush(settings.LINECOLOR))
font = QFont()
font.setBold(True)
font.setPointSizeF(font.pointSizeF() * 1.25 + 1)
self.frequency_marker[1].setFont(font)
self.frequency_marker[0].setLine(x_pos, y1, x_pos, y2)
scale_x, scale_y = util.calc_x_y_scale(self.sceneRect(), self.parent())
self.frequency_marker[1].setTransform(QTransform.fromScale(scale_x, scale_y), False)
self.frequency_marker[1].setText("Tune to " + Formatter.big_value_with_suffix(frequency, decimals=3))
font_metric = QFontMetrics(self.frequency_marker[1].font())
text_width = font_metric.width("Tune to") * scale_x
text_width += (font_metric.width(" ") * scale_x) / 2
self.frequency_marker[1].setPos(x_pos-text_width, 0.95*y1)
def clear_frequency_marker(self):
if self.frequency_marker is not None:
self.removeItem(self.frequency_marker[0])
self.removeItem(self.frequency_marker[1])
self.frequency_marker = None
def get_freq_for_pos(self, x: int) -> float:
try:
f = self.frequencies[x]
except IndexError:
return None
return self.center_freq + f
def METHOD_NAME(self):
self.clear_frequency_marker()
super().METHOD_NAME() |
6,335 | get begidx | import os.path
import sys
from warnings import warn
try:
_console = sys._jy_console
_reader = _console.reader
except AttributeError:
raise ImportError("Cannot access JLine2 setup")
try:
# jarjar-ed version
from org.python.jline.console.history import MemoryHistory
except ImportError:
# dev version from extlibs
from jline.console.history import MemoryHistory
__all__ = ['add_history', 'clear_history', 'get_begidx', 'get_completer',
'get_completer_delims', 'get_current_history_length',
'get_endidx', 'get_history_item', 'get_history_length',
'get_line_buffer', 'insert_text', 'parse_and_bind',
'read_history_file', 'read_init_file', 'redisplay',
'remove_history_item', 'set_completer', 'set_completer_delims',
'set_history_length', 'set_pre_input_hook', 'set_startup_hook',
'write_history_file']
_history_list = None
# The need for the following warnings should go away once we update
# JLine. Choosing ImportWarning as the closest warning to what is
# going on here, namely this is functionality not yet available on
# Jython.
class NotImplementedWarning(ImportWarning):
"""Not yet implemented by Jython"""
class SecurityWarning(ImportWarning):
"""Security manager prevents access to private field"""
def parse_and_bind(string):
pass
def get_line_buffer():
return str(_reader.cursorBuffer.buffer)
def insert_text(string):
_reader.putString(string)
def read_init_file(filename=None):
warn("read_init_file: %s" % (filename,), NotImplementedWarning, "module", 2)
def read_history_file(filename="~/.history"):
expanded = os.path.expanduser(filename)
with open(expanded) as f:
_reader.history.load(f)
def write_history_file(filename="~/.history"):
expanded = os.path.expanduser(filename)
with open(expanded, 'w') as f:
for line in _reader.history.entries():
f.write(line.value().encode("utf-8"))
f.write("\n")
def clear_history():
_reader.history.clear()
def add_history(line):
_reader.history.add(line)
def get_history_length():
return _reader.history.maxSize
def set_history_length(length):
_reader.history.maxSize = length
def get_current_history_length():
return _reader.history.size()
def get_history_item(index):
# JLine indexes from 0 while readline indexes from 1 (at least in test_readline)
if index>0:
return _reader.history.get(index-1)
else:
return None
def remove_history_item(pos):
_reader.history.remove(pos)
def replace_history_item(pos, line):
_reader.history.set(pos, line)
def redisplay():
_reader.redrawLine()
def set_startup_hook(function=None):
_console.startupHook = function
def set_pre_input_hook(function=None):
warn("set_pre_input_hook %s" % (function,), NotImplementedWarning, stacklevel=2)
_completer_function = None
def set_completer(function=None):
"""set_completer([function]) -> None
Set or remove the completer function.
The function is called as function(text, state),
for state in 0, 1, 2, ..., until it returns a non-string.
It should return the next possible completion starting with 'text'."""
global _completer_function
_completer_function = function
def complete_handler(buffer, cursor, candidates):
start = _get_delimited(buffer, cursor)[0]
delimited = buffer[start:cursor]
try:
sys.ps2
have_ps2 = True
except AttributeError:
have_ps2 = False
if (have_ps2 and _reader.prompt == sys.ps2) and (not delimited or delimited.isspace()):
# Insert tab (as expanded to 4 spaces), but only if if
# preceding is whitespace/empty and in console
# continuation; this is a planned featue for Python 3 per
# http://bugs.python.org/issue5845
#
# Ideally this would not expand tabs, in case of mixed
# copy&paste of tab-indented code, however JLine2 gets
# confused as to the cursor position if certain, but not
# all, subsequent editing if the tab is backspaced
candidates.add(" " * 4)
return start
# TODO: if there are a reasonably large number of completions
# (need to get specific numbers), CPython 3.4 will show a
# message like so:
# >>>
# Display all 186 possibilities? (y or n)
# Currently Jython arbitrarily limits this to 100 and displays them
for state in xrange(100):
completion = None
try:
completion = function(delimited, state)
except:
pass
if completion:
candidates.add(completion)
else:
break
return start
_reader.addCompleter(complete_handler)
def get_completer():
return _completer_function
def _get_delimited(buffer, cursor):
start = cursor
for i in xrange(cursor-1, -1, -1):
if buffer[i] in _completer_delims:
break
start = i
return start, cursor
def METHOD_NAME():
return _get_delimited(str(_reader.cursorBuffer.buffer), _reader.cursorBuffer.cursor)[0]
def get_endidx():
return _get_delimited(str(_reader.cursorBuffer.buffer), _reader.cursorBuffer.cursor)[1]
def set_completer_delims(string):
global _completer_delims, _completer_delims_set
_completer_delims = string
_completer_delims_set = set(string)
def get_completer_delims():
return _completer_delims
set_completer_delims(' \t\n`~!@#$%^&*()-=+[{]}\\|;:\'",<>/?') |
6,336 | read log file | #!/usr/bin/env python3
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
"""
Log Parser for Dictionary-based Logging
This uses the JSON database file to decode the input binary
log data and print the log messages.
"""
import argparse
import binascii
import logging
import sys
import dictionary_parser
from dictionary_parser.log_database import LogDatabase
LOGGER_FORMAT = "%(message)s"
logger = logging.getLogger("parser")
LOG_HEX_SEP = "##ZLOGV1##"
def parse_args():
"""Parse command line arguments"""
argparser = argparse.ArgumentParser(allow_abbrev=False)
argparser.add_argument("dbfile", help="Dictionary Logging Database file")
argparser.add_argument("logfile", help="Log Data file")
argparser.add_argument("--hex", action="store_true",
help="Log Data file is in hexadecimal strings")
argparser.add_argument("--rawhex", action="store_true",
help="Log file only contains hexadecimal log data")
argparser.add_argument("--debug", action="store_true",
help="Print extra debugging information")
return argparser.parse_args()
def METHOD_NAME(args):
"""
Read the log from file
"""
logdata = None
# Open log data file for reading
if args.hex:
if args.rawhex:
# Simply log file with only hexadecimal data
logdata = dictionary_parser.utils.convert_hex_file_to_bin(args.logfile)
else:
hexdata = ''
with open(args.logfile, "r", encoding="iso-8859-1") as hexfile:
for line in hexfile.readlines():
hexdata += line.strip()
if LOG_HEX_SEP not in hexdata:
logger.error("ERROR: Cannot find start of log data, exiting...")
sys.exit(1)
idx = hexdata.index(LOG_HEX_SEP) + len(LOG_HEX_SEP)
hexdata = hexdata[idx:]
if len(hexdata) % 2 != 0:
# Make sure there are even number of characters
idx = int(len(hexdata) / 2) * 2
hexdata = hexdata[:idx]
idx = 0
while idx < len(hexdata):
# When running QEMU via west or ninja, there may be additional
# strings printed by QEMU, west or ninja (for example, QEMU
# is terminated, or user interrupted, etc). So we need to
# figure out where the end of log data stream by
# trying to convert from hex to bin.
idx += 2
try:
binascii.unhexlify(hexdata[:idx])
except binascii.Error:
idx -= 2
break
logdata = binascii.unhexlify(hexdata[:idx])
else:
logfile = open(args.logfile, "rb")
if not logfile:
logger.error("ERROR: Cannot open binary log data file: %s, exiting...", args.logfile)
sys.exit(1)
logdata = logfile.read()
logfile.close()
return logdata
def main():
"""Main function of log parser"""
args = parse_args()
# Setup logging for parser
logging.basicConfig(format=LOGGER_FORMAT)
if args.debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
# Read from database file
database = LogDatabase.read_json_database(args.dbfile)
if database is None:
logger.error("ERROR: Cannot open database file: %s, exiting...", args.dbfile)
sys.exit(1)
logdata = METHOD_NAME(args)
if logdata is None:
logger.error("ERROR: cannot read log from file: %s, exiting...", args.logfile)
sys.exit(1)
log_parser = dictionary_parser.get_parser(database)
if log_parser is not None:
logger.debug("# Build ID: %s", database.get_build_id())
logger.debug("# Target: %s, %d-bit", database.get_arch(), database.get_tgt_bits())
if database.is_tgt_little_endian():
logger.debug("# Endianness: Little")
else:
logger.debug("# Endianness: Big")
ret = log_parser.parse_log_data(logdata, debug=args.debug)
if not ret:
logger.error("ERROR: there were error(s) parsing log data")
sys.exit(1)
else:
logger.error("ERROR: Cannot find a suitable parser matching database version!")
sys.exit(1)
if __name__ == "__main__":
main() |
6,337 | backward shape | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Data layout."""
from typing import Union
import tvm._ffi
from tvm.runtime import Object
from . import _ffi_api
@tvm._ffi.register_object("tir.Layout")
class Layout(Object):
"""Layout is composed of upper cases, lower cases and numbers,
where upper case indicates a primal axis and
the corresponding lower case with factor size indicates the subordinate axis.
For example, NCHW16c can describe a 5-D tensor of
[batch_size, channel, height, width, channel_block].
Here subordinate axis channel_block=16 is the factor size of the primal axis C (channel).
See Also
--------
layout : Declare a layout
"""
def __len__(self):
return _ffi_api.LayoutNdim(self) # type: ignore
def __contains__(self, axis):
return len(axis) == 1 and axis[0].isalpha() and axis[0] in self.name
def __getitem__(self, index):
if index >= len(self):
raise IndexError("Layout index out of range")
return _ffi_api.LayoutGetItem(self, index) # type: ignore
def index_of(self, axis):
"""Get the index of an axis
Parameters
----------
axis : str
The axis name, need to be [a-z,A-Z]
Returns
-------
index : int
The index of the axis, -1 if not found.
"""
return _ffi_api.LayoutIndexOf(self, axis) # type: ignore
def factor_of(self, axis):
"""Get the factor size of the subordinate axis.
Parameters
----------
axis : str
The axis name, need to be [a-z,A-Z]
Returns
-------
factor : int
the size of the subordinate-axis of axis (if axis is a primal-axis),
or the size of axis itself (if axis is a subordinate-axis).
Return -1 if axis is not in the layout.
"""
return _ffi_api.LayoutFactorOf(self, axis) # type: ignore
@tvm._ffi.register_object("tir.BijectiveLayout")
class BijectiveLayout(Object):
"""Bijective mapping for two layouts (src-layout and dst-layout).
It provides shape and index conversion between each other.
Do not construct directly, use :any:`bijective_layout` instead.
See the documentation of :any:`bijective_layout` for more details.
Parameters
----------
src_layout : str or Layout
source layout.
dst_layout : str or Layout
destination layout.
See Also
--------
bijective_layout : Declare a layout
"""
def forward_index(self, index):
"""Given the indices of the src-layout, infer the dst index.
Parameters
----------
index: Array of Expr
The indices in src-layout.
Returns
-------
dst_index: Array of Expr
The inferred indices in dst-layout.
"""
return _ffi_api.BijectiveLayoutForwardIndex(self, index) # type: ignore
def backward_index(self, index):
"""Given the indices of the dst-layout, infer the src index.
Parameters
----------
index: Array of Expr
The indices in dst-layout.
Returns
-------
src_index: Array of Expr
The inferred indices in src-layout.
"""
return _ffi_api.BijectiveLayoutBackwardIndex(self, index) # type: ignore
def forward_shape(self, shape):
"""Given the shape of the src-layout, infer the dst shape.
Parameters
----------
shape: Array of Expr
The shape in src-layout.
Returns
-------
dst_shape: Array of Expr
The inferred shape in dst-layout.
"""
return _ffi_api.BijectiveLayoutForwardShape(self, shape) # type: ignore
def METHOD_NAME(self, shape):
"""Given the shape of the dst-layout, infer the src shape.
Parameters
----------
shape: Array of Expr
The shape in dst-layout.
Returns
-------
src_shape: Array of Expr
The inferred shape in src-layout.
"""
return _ffi_api.BijectiveLayoutBackwardShape(self, shape) # type: ignore
def layout(layout_str: str, dtype: str = "int32") -> Layout:
"""Create a layout node from a string.
Parameters
----------
layout_str : str
A layout representation is composed of upper cases, lower cases and numbers,
where upper case indicates a primal axis and
the corresponding lower case with factor size indicates the subordinate axis.
For example, NCHW16c can describe a 5-D tensor of
[batch_size, channel, height, width, channel_block].
Here subordinate axis channel_block=16 is the factor size of
the primal axis C (channel).
dtype : str
The dtype of generated axes vars in the returned layout.
It is required to be integer type.
Returns
-------
layout : Layout
The created layout
"""
return _ffi_api.Layout(layout_str, dtype) # type: ignore
def bijective_layout(
src_layout: Union[str, Layout], dst_layout: Union[str, Layout]
) -> BijectiveLayout:
"""Create a bijective layout mapping.
Parameters
----------
src_layout : str or Layout
source layout.
dst_layout : str or Layout
destination layout.
Returns
-------
bijective_layout : BijectiveLayout
The created bijective layout
"""
if isinstance(src_layout, str):
src_layout = layout(src_layout)
if isinstance(dst_layout, str):
dst_layout = layout(dst_layout)
return _ffi_api.BijectiveLayout(src_layout, dst_layout) # type: ignore |
6,338 | source | from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
from conan.tools.build import cross_building
from conan.tools.env import VirtualBuildEnv, VirtualRunEnv
from conan.tools.files import copy, get, rm, rmdir
from conan.tools.gnu import Autotools, AutotoolsDeps, AutotoolsToolchain, PkgConfigDeps
from conan.tools.layout import basic_layout
import os
required_conan_version = ">=1.53.0"
class PulseAudioConan(ConanFile):
name = "pulseaudio"
description = "PulseAudio is a sound system for POSIX OSes, meaning that it is a proxy for sound applications."
topics = ("sound",)
url = "https://github.com/conan-io/conan-center-index"
homepage = "http://pulseaudio.org/"
license = "LGPL-2.1"
package_type = "library"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"with_alsa": [True, False],
"with_glib": [True, False],
"with_fftw": [True, False],
"with_x11": [True, False],
"with_openssl": [True, False],
"with_dbus": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"with_alsa": True,
"with_glib": False,
"with_fftw": False,
"with_x11": True,
"with_openssl": True,
"with_dbus": False,
}
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")
self.settings.rm_safe("compiler.libcxx")
self.settings.rm_safe("compiler.cppstd")
if not self.options.with_dbus:
del self.options.with_fftw
def layout(self):
basic_layout(self, src_folder="src")
def requirements(self):
self.requires("libiconv/1.17")
self.requires("libsndfile/1.2.2")
self.requires("libcap/2.68")
self.requires("libtool/2.4.7")
if self.options.with_alsa:
self.requires("libalsa/1.2.7.2")
if self.options.with_glib:
self.requires("glib/2.77.2")
if self.options.get_safe("with_fftw"):
self.requires("fftw/3.3.10")
if self.options.with_x11:
self.requires("xorg/system")
if self.options.with_openssl:
self.requires("openssl/[>=1.1 <4]")
if self.options.with_dbus:
self.requires("dbus/1.15.8")
def validate(self):
if self.settings.os != "Linux":
raise ConanInvalidConfiguration("pulseaudio supports only linux currently")
if self.options.get_safe("with_fftw"):
fftw_precision = self.dependencies["fftw"].options.precision
if fftw_precision != "single":
raise ConanInvalidConfiguration(
f"Pulse audio cannot use fftw {fftw_precision} precision. "
"Either set option fftw:precision=single or pulseaudio:with_fftw=False"
)
def build_requirements(self):
self.tool_requires("gettext/0.21")
self.tool_requires("libtool/2.4.7")
if not self.conf.get("tools.gnu:pkg_config", check_type=str):
self.tool_requires("pkgconf/1.9.5")
def METHOD_NAME(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
env = VirtualBuildEnv(self)
env.generate()
if not cross_building(self):
env = VirtualRunEnv(self)
env.generate(scope="build")
tc = AutotoolsToolchain(self)
yes_no = lambda v: "yes" if v else "no"
tc.configure_args.extend([
f"--enable-shared={yes_no(self.options.shared)}",
f"--enable-static={yes_no(not self.options.shared)}",
f"--enable-glib2={yes_no(self.options.with_glib)}",
f"--with-fftw={yes_no(self.options.get_safe('with_fftw'))}",
"--with-udev-rules-dir=${prefix}/bin/udev/rules.d",
f"--with-systemduserunitdir={os.path.join(self.build_folder, 'ignore')}",
])
for lib in ["alsa", "x11", "openssl", "dbus"]:
tc.configure_args.append(f"--enable-{lib}={yes_no(getattr(self.options, f'with_{lib}'))}")
# TODO: to remove when automatically handled by AutotoolsToolchain
tc.configure_args.append("--libexecdir=${prefix}/bin")
tc.generate()
deps = AutotoolsDeps(self)
deps.generate()
pkg = PkgConfigDeps(self)
pkg.generate()
def build(self):
autotools = Autotools(self)
autotools.configure()
autotools.make()
def package(self):
copy(self, "LICENSE", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
autotools = Autotools(self)
autotools.install()
rmdir(self, os.path.join(self.package_folder, "etc"))
rmdir(self, os.path.join(self.package_folder, "share"))
rmdir(self, os.path.join(self.package_folder, "lib", "cmake"))
rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig"))
rm(self, "*.la", os.path.join(self.package_folder, "lib"), recursive=True)
def package_info(self):
self.cpp_info.components["pulse"].set_property("pkg_config_name", "libpulse")
self.cpp_info.components["pulse"].libs = ["pulse", f"pulsecommon-{self.version}"]
self.cpp_info.components["pulse"].libdirs.append(os.path.join("lib", "pulseaudio"))
self.cpp_info.components["pulse"].requires = ["libiconv::libiconv", "libsndfile::libsndfile", "libcap::libcap", "libtool::libtool"]
if self.options.with_alsa:
self.cpp_info.components["pulse"].requires.append("libalsa::libalsa")
if self.options.get_safe("with_fftw"):
self.cpp_info.components["pulse"].requires.append("fftw::fftw")
if self.options.with_x11:
self.cpp_info.components["pulse"].requires.append("xorg::xorg")
if self.options.with_openssl:
self.cpp_info.components["pulse"].requires.append("openssl::openssl")
if self.options.with_dbus:
self.cpp_info.components["pulse"].requires.append("dbus::dbus")
self.cpp_info.components["pulse-simple"].set_property("pkg_config_name", "libpulse-simple")
self.cpp_info.components["pulse-simple"].libs = ["pulse-simple"]
self.cpp_info.components["pulse-simple"].defines.append("_REENTRANT")
self.cpp_info.components["pulse-simple"].requires = ["pulse"]
if self.options.with_glib:
self.cpp_info.components["pulse-mainloop-glib"].set_property("pkg_config_name", "libpulse-mainloop-glib")
self.cpp_info.components["pulse-mainloop-glib"].libs = ["pulse-mainloop-glib"]
self.cpp_info.components["pulse-mainloop-glib"].defines.append("_REENTRANT")
self.cpp_info.components["pulse-mainloop-glib"].requires = ["pulse", "glib::glib-2.0"]
# FIXME: add cmake generators when conan can generate PULSEAUDIO_INCLUDE_DIR PULSEAUDIO_LIBRARY vars |
6,339 | reset parameters | from typing import Union
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import FloatTensor
from torch.nn.modules.module import Module
from torch.nn.parameter import Parameter
class Discriminator(nn.Module):
"""Module that learns associations between graph embeddings and their positively-labeled augmentations
Args:
nf: Dimensionality (along the feature axis) of the input array
"""
def __init__(self, nf: int):
super(Discriminator, self).__init__()
self.f_k = nn.Bilinear(nf, nf, 1)
for m in self.modules():
self.weights_init(m)
def weights_init(self, m):
if isinstance(m, nn.Bilinear):
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
def forward(self, g_repr: FloatTensor, g_pos: FloatTensor, g_neg: FloatTensor):
"""Feeds data forward through network and computes graph representations
Args:
g_repr: Representation of source graph, with aggregated neighborhood representations
g_pos : Representation of augmentation of the source graph that can be considered a positive pairing,
with aggregated neighborhood representations
g_neg: Representation of augmentation of the source graph that can be considered a negative pairing,
with aggregated neighborhood representations
Returns:
logits: Similarity score for the positive and negative paired graphs
"""
c_x = g_repr.expand_as(g_pos)
sc_1 = self.f_k(g_pos, c_x)
sc_2 = self.f_k(g_neg, c_x)
logits = torch.cat((sc_1, sc_2), 1)
return logits
class AvgReadout(nn.Module):
"""
Aggregates graph embedding information over graph neighborhoods to obtain global representation of the graph
"""
def __init__(self):
super(AvgReadout, self).__init__()
def forward(self, emb: FloatTensor, mask: FloatTensor):
"""
Args:
emb : float tensor
Graph embedding
mask : float tensor
Selects elements to aggregate for each row
"""
vsum = torch.mm(mask, emb)
row_sum = torch.sum(mask, 1)
row_sum = row_sum.expand((vsum.shape[1], row_sum.shape[0])).T
global_emb = vsum / row_sum
return F.normalize(global_emb, p=2, dim=1)
class Encoder(Module):
"""Representation learning for spatial transcriptomics data
Args:
in_features: Number of features in the dataset
out_features: Size of the desired encoding
graph_neigh: Pairwise adjacency matrix indicating which spots are neighbors of which other spots
dropout: Proportion of weights in each layer to set to 0
act: object of class `torch.nn.functional`, default `F.relu`. Activation function for each encoder layer
clip: Threshold below which imputed feature values will be set to 0, as a percentile of the max value
"""
def __init__(
self,
in_features: int,
out_features: int,
graph_neigh: FloatTensor,
dropout: float = 0.0,
act=F.relu,
clip: Union[None, float] = None,
):
super(Encoder, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.graph_neigh = graph_neigh
self.dropout = dropout
self.act = act
self.clip = clip
self.weight1 = Parameter(torch.FloatTensor(self.in_features, self.out_features))
self.weight2 = Parameter(torch.FloatTensor(self.out_features, self.in_features))
self.METHOD_NAME()
self.disc = Discriminator(self.out_features)
self.sigm = nn.Sigmoid()
self.read = AvgReadout()
def METHOD_NAME(self):
torch.nn.init.xavier_uniform_(self.weight1)
torch.nn.init.xavier_uniform_(self.weight2)
def forward(self, feat: FloatTensor, feat_a: FloatTensor, adj: FloatTensor):
"""
Args:
feat: Counts matrix
feat_a: Counts matrix following permutation and augmentation
adj: Pairwise distance matrix
"""
z = F.dropout(feat, self.dropout, self.training)
z = torch.mm(z, self.weight1)
z = torch.mm(adj, z)
hidden_emb = z
h = torch.mm(z, self.weight2)
h = torch.mm(adj, h)
# Clipping constraint:
if self.clip is not None:
thresh = torch.quantile(h, self.clip, dim=0)
mask = h < thresh
h[mask] = 0
# Non-negativity constraint:
nz_mask = h < 0
h[nz_mask] = 0
emb = self.act(z)
# Adversarial learning:
z_a = F.dropout(feat_a, self.dropout, self.training)
z_a = torch.mm(z_a, self.weight1)
z_a = torch.mm(adj, z_a)
emb_a = self.act(z_a)
g = self.read(emb, self.graph_neigh)
g = self.sigm(g)
g_a = self.read(emb_a, self.graph_neigh)
g_a = self.sigm(g_a)
ret = self.disc(g, emb, emb_a)
ret_a = self.disc(g_a, emb_a, emb)
return hidden_emb, h, ret, ret_a |
6,340 | test failover to second master | import logging
import os
import shutil
import time
import pytest
pytestmark = [
pytest.mark.core_test,
pytest.mark.skip_on_freebsd(reason="Processes are not properly killed on FreeBSD"),
]
log = logging.getLogger(__name__)
def test_pki(salt_mm_failover_master_1, salt_mm_failover_master_2, caplog):
"""
Verify https://docs.saltproject.io/en/latest/topics/tutorials/multimaster_pki.html
"""
# At first we spin up a simple minion in order to capture its logging output.
config_defaults = {
"transport": salt_mm_failover_master_1.config["transport"],
}
mm_master_1_port = salt_mm_failover_master_1.config["ret_port"]
mm_master_1_addr = salt_mm_failover_master_1.config["interface"]
mm_master_2_port = salt_mm_failover_master_2.config["ret_port"]
mm_master_2_addr = salt_mm_failover_master_2.config["interface"]
config_overrides = {
"master": [
"{}:{}".format(mm_master_1_addr, mm_master_1_port),
"{}:{}".format(mm_master_2_addr, mm_master_2_port),
],
"publish_port": salt_mm_failover_master_1.config["publish_port"],
"master_type": "failover",
"master_alive_interval": 5,
"master_tries": -1,
"verify_master_pubkey_sign": True,
}
factory = salt_mm_failover_master_1.salt_minion_daemon(
"mm-failover-pki-minion-1",
defaults=config_defaults,
overrides=config_overrides,
extra_cli_arguments_after_first_start_failure=["--log-level=info"],
)
# Need to grab the public signing key from the master, either will do
shutil.copyfile(
os.path.join(salt_mm_failover_master_1.config["pki_dir"], "master_sign.pub"),
os.path.join(factory.config["pki_dir"], "master_sign.pub"),
)
with caplog.at_level(logging.DEBUG):
with factory.started(start_timeout=120):
pass
assert (
"Successfully verified signature of master public key with verification public key master_sign.pub"
in caplog.text
)
def test_return_to_assigned_master(
mm_failover_master_1_salt_cli,
mm_failover_master_2_salt_cli,
salt_mm_failover_minion_1,
salt_mm_failover_minion_2,
run_salt_cmds,
):
"""
Test that values are being returned to only the master the minion is currently connected to.
"""
returns = run_salt_cmds(
[mm_failover_master_1_salt_cli, mm_failover_master_2_salt_cli],
[salt_mm_failover_minion_1, salt_mm_failover_minion_2],
)
assert len(returns) == 2
assert (mm_failover_master_1_salt_cli, salt_mm_failover_minion_1) in returns
assert (mm_failover_master_2_salt_cli, salt_mm_failover_minion_2) in returns
def METHOD_NAME(
event_listener,
salt_mm_failover_master_1,
salt_mm_failover_master_2,
salt_mm_failover_minion_1,
salt_mm_failover_minion_2,
mm_failover_master_1_salt_cli,
mm_failover_master_2_salt_cli,
run_salt_cmds,
):
"""
Test then when the first master is stopped, connected minions failover to the second master.
"""
event_patterns = [
(
salt_mm_failover_master_2.id,
"salt/minion/{}/start".format(salt_mm_failover_minion_1.id),
)
]
start_time = time.time()
with salt_mm_failover_master_1.stopped():
assert salt_mm_failover_master_2.is_running()
# We need to wait for them to realize that the master is not alive
# At this point, only the first minion will need to change masters
events = event_listener.wait_for_events(
event_patterns,
timeout=salt_mm_failover_minion_1.config["master_alive_interval"] * 4,
after_time=start_time,
)
assert salt_mm_failover_minion_1.is_running()
assert not events.missed
returns = run_salt_cmds(
[mm_failover_master_1_salt_cli, mm_failover_master_2_salt_cli],
[salt_mm_failover_minion_1, salt_mm_failover_minion_2],
)
assert len(returns) == 2
assert (mm_failover_master_2_salt_cli, salt_mm_failover_minion_1) in returns
assert (mm_failover_master_2_salt_cli, salt_mm_failover_minion_2) in returns
def test_minion_reconnection(
salt_mm_failover_minion_1,
salt_mm_failover_minion_2,
mm_failover_master_1_salt_cli,
mm_failover_master_2_salt_cli,
run_salt_cmds,
):
"""
Test that minions reconnect to a live master.
To work well with salt factories, the minions will reconnect to the master they were connected to in conftest.py.
"""
with salt_mm_failover_minion_1.stopped(), salt_mm_failover_minion_2.stopped():
log.debug("Minions have stopped. They will restart next.")
returns = run_salt_cmds(
[mm_failover_master_1_salt_cli, mm_failover_master_2_salt_cli],
[salt_mm_failover_minion_1, salt_mm_failover_minion_2],
)
assert len(returns) == 2
assert (mm_failover_master_1_salt_cli, salt_mm_failover_minion_1) in returns
assert (mm_failover_master_2_salt_cli, salt_mm_failover_minion_2) in returns
@pytest.mark.skip_on_windows
def test_minions_alive_with_no_master(
grains,
event_listener,
salt_mm_failover_master_1,
salt_mm_failover_master_2,
salt_mm_failover_minion_1,
salt_mm_failover_minion_2,
):
"""
Make sure the minions stay alive after all masters have stopped.
"""
if grains["os_family"] == "Debian" and grains["osmajorrelease"] == 9:
pytest.skip(
"Skipping on Debian 9 until flaky issues resolved. See issue #61749"
)
start_time = time.time()
with salt_mm_failover_master_1.stopped():
with salt_mm_failover_master_2.stopped():
# Make sure they had at least one chance to re-auth
events = event_listener.wait_for_events(
[
(salt_mm_failover_minion_1.id, "__master_disconnected"),
(salt_mm_failover_minion_2.id, "__master_disconnected"),
],
timeout=salt_mm_failover_minion_1.config["master_alive_interval"] * 4,
after_time=start_time,
)
assert not events.missed
assert salt_mm_failover_minion_1.is_running()
assert salt_mm_failover_minion_2.is_running()
start_time = time.time()
event_patterns = [
(
salt_mm_failover_master_1.id,
"salt/minion/{}/start".format(salt_mm_failover_minion_1.id),
),
(
salt_mm_failover_master_1.id,
"salt/minion/{}/start".format(salt_mm_failover_minion_2.id),
),
(
salt_mm_failover_master_2.id,
"salt/minion/{}/start".format(salt_mm_failover_minion_1.id),
),
(
salt_mm_failover_master_2.id,
"salt/minion/{}/start".format(salt_mm_failover_minion_2.id),
),
]
events = event_listener.wait_for_events(
event_patterns,
timeout=salt_mm_failover_minion_1.config["master_alive_interval"] * 8,
after_time=start_time,
)
assert len(events.matches) >= 2
expected_tags = {
"salt/minion/{}/start".format(salt_mm_failover_minion_1.id),
"salt/minion/{}/start".format(salt_mm_failover_minion_2.id),
}
assert {event.tag for event in events} == expected_tags |
6,341 | onlyaml script | #!/usr/bin/python2
############################################################
#
# Extended YAML Support
#
# Supports include files and variable interpolations.
#
############################################################
import yaml
import os
import pprint
import tempfile
from string import Template
class OnlYamlError(Exception):
"""General Error Exception"""
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
def dflatten(rv, in_):
if type(in_) is dict:
rv.update(in_)
return rv
elif type(in_) is list:
for e in in_:
dflatten(rv, e)
return rv
else:
raise OnlYamlError("Element type '%s' cannot be added to the given dictionary." % (type(in_)))
def loadf(fname, vard={}):
# Apply variable interpolation:
def interpolate(s, d):
error_string = "Yaml variable substitution error: '%s' could not be resolved."
try:
template = Template(s)
s = template.substitute(d)
except KeyError as e:
raise OnlYamlError(error_string % (e.args[0]))
except:
raise
return s
variables = {}
# Files can reference environment variables
variables.update(os.environ)
# Files can reference their own directory.
variables['__DIR__'] = os.path.dirname(os.path.abspath(fname))
# Files can reference invokation parameters.
variables.update(vard)
# Yaml Include constructor. Allows variables.
def onlyaml_include(loader, node):
# Get the path out of the yaml file
directive = node.value
fields = directive.split()
fname = fields[0]
options = fields[1:]
for opt in options:
try:
(k,v) = opt.split('=')
except ValueError:
raise OnlYamlError("Bad include directive: %s" % opt)
variables[k] = v;
fname = interpolate(fname, variables)
if not os.path.isabs(fname):
fname = os.path.join(os.path.dirname(loader.name), fname)
if not os.path.exists(fname):
raise OnlYamlError("Include file '%s' (from %s) does not exist." % (fname, loader.name))
return loadf(fname, variables)
# Yaml dynamic constructor. Allow dynamically generated yaml.
def METHOD_NAME(loader, node):
directive = interpolate(node.value, variables)
tf = tempfile.NamedTemporaryFile()
tf.close()
if os.system("%s > %s" % (directive, tf.name)) != 0:
raise OnlYamlError("Script execution '%s' failed." % directive)
return loadf(tf.name, variables)
yaml.add_constructor("!include", onlyaml_include)
yaml.add_constructor("!script", METHOD_NAME)
# First load: grab the variables dict
string = open(fname).read()
try:
data = yaml.load(string)
except Exception, e:
raise OnlYamlError("%s\n(filename: %s)" % (e, fname))
if type(data) is dict:
_v = dflatten({}, data.get('variables', {}))
variables.update(_v)
for (k,v) in _v.iteritems():
k = interpolate(k, variables)
v = interpolate(v, variables)
variables[k] = v
############################################################
#
# Interpolate the entire package contents using the
# generated variables dict and reload it.
#
############################################################
string = interpolate(string, variables)
try:
data = yaml.load(string)
except OnlYamlError, e:
raise e
except Exception, e:
raise OnlYamlError("Interpolation produced invalid results:\n%s\n" % string)
return data
if __name__ == '__main__':
import sys
try:
if len(sys.argv) == 2:
print yaml.dump(loadf(sys.argv[1]))
else:
sys.stderr.write("usage: %s <yamlfile>\n" % sys.argv[0])
except OnlYamlError, e:
sys.stderr.write("error: %s\n" % e.value)
|
6,342 | test fan modes | from homeassistant.components.climate.const import ClimateEntityFeature, HVACMode
from homeassistant.const import UnitOfTemperature
from ..const import BECA_BHP6000_PAYLOAD
from ..helpers import assert_device_properties_set
from ..mixins.climate import TargetTemperatureTests
from ..mixins.light import BasicLightTests
from ..mixins.lock import BasicLockTests
from .base_device_tests import TuyaDeviceTestCase
LIGHT_DPS = "1"
TEMPERATURE_DPS = "2"
CURRENTTEMP_DPS = "3"
PRESET_DPS = "4"
HVACMODE_DPS = "5"
FAN_DPS = "6"
LOCK_DPS = "7"
class TestBecaBHP6000Thermostat(
BasicLightTests,
BasicLockTests,
TargetTemperatureTests,
TuyaDeviceTestCase,
):
__test__ = True
def setUp(self):
self.setUpForConfig(
"beca_bhp6000_thermostat_f.yaml",
BECA_BHP6000_PAYLOAD,
)
self.subject = self.entities.get("climate")
self.setUpTargetTemperature(
TEMPERATURE_DPS,
self.subject,
min=40,
max=95,
)
self.setUpBasicLight(LIGHT_DPS, self.entities.get("light_display"))
self.setUpBasicLock(LOCK_DPS, self.entities.get("lock_child_lock"))
self.mark_secondary(["light_display", "lock_child_lock"])
def test_supported_features(self):
self.assertEqual(
self.subject.supported_features,
(
ClimateEntityFeature.FAN_MODE
| ClimateEntityFeature.PRESET_MODE
| ClimateEntityFeature.TARGET_TEMPERATURE
),
)
def test_temperature_unit_returns_configured_temperature_unit(self):
self.assertEqual(
self.subject.temperature_unit,
UnitOfTemperature.FAHRENHEIT,
)
async def test_legacy_set_temperature_with_preset_mode(self):
async with assert_device_properties_set(
self.subject._device,
{PRESET_DPS: 1},
):
await self.subject.async_set_temperature(preset_mode="program")
async def test_legacy_set_temperature_with_both_properties(self):
async with assert_device_properties_set(
self.subject._device,
{
TEMPERATURE_DPS: 78,
PRESET_DPS: 4,
},
):
await self.subject.async_set_temperature(
temperature=78,
preset_mode="away",
)
def test_current_temperature(self):
self.dps[CURRENTTEMP_DPS] = 70
self.assertEqual(self.subject.current_temperature, 70)
def test_hvac_mode(self):
self.dps[HVACMODE_DPS] = "1"
self.assertEqual(self.subject.hvac_mode, HVACMode.COOL)
self.dps[HVACMODE_DPS] = "2"
self.assertEqual(self.subject.hvac_mode, HVACMode.HEAT)
self.dps[HVACMODE_DPS] = "3"
self.assertEqual(self.subject.hvac_mode, HVACMode.OFF)
self.dps[HVACMODE_DPS] = "4"
self.assertEqual(self.subject.hvac_mode, HVACMode.HEAT_COOL)
self.dps[HVACMODE_DPS] = "5"
self.assertEqual(self.subject.hvac_mode, HVACMode.AUTO)
def test_hvac_modes(self):
self.assertCountEqual(
self.subject.hvac_modes,
[
HVACMode.OFF,
HVACMode.HEAT,
HVACMode.HEAT_COOL,
HVACMode.COOL,
HVACMode.AUTO,
],
)
def test_fan_mode(self):
self.dps[FAN_DPS] = False
self.assertEqual(self.subject.fan_mode, "auto")
self.dps[FAN_DPS] = True
self.assertEqual(self.subject.fan_mode, "on")
def METHOD_NAME(self):
self.assertCountEqual(
self.subject.fan_modes,
[
"auto",
"on",
],
)
async def test_set_fan_mode_to_auto(self):
async with assert_device_properties_set(
self.subject._device,
{FAN_DPS: False},
):
await self.subject.async_set_fan_mode("auto")
async def test_set_fan_mode_to_on(self):
async with assert_device_properties_set(
self.subject._device,
{FAN_DPS: True},
):
await self.subject.async_set_fan_mode("on")
def test_extra_state_attributes(self):
self.assertEqual(self.subject.extra_state_attributes, {})
def test_icons(self):
self.dps[HVACMODE_DPS] = 1
self.assertEqual(self.subject.icon, "mdi:snowflake")
self.dps[HVACMODE_DPS] = 2
self.assertEqual(self.subject.icon, "mdi:fire")
self.dps[HVACMODE_DPS] = 3
self.assertEqual(self.subject.icon, "mdi:hvac-off")
self.dps[HVACMODE_DPS] = 4
self.assertEqual(self.subject.icon, "mdi:fire-alert")
self.dps[HVACMODE_DPS] = 5
self.assertEqual(self.subject.icon, "mdi:hvac")
self.dps[LIGHT_DPS] = True
self.assertEqual(self.basicLight.icon, "mdi:led-on")
self.dps[LIGHT_DPS] = False
self.assertEqual(self.basicLight.icon, "mdi:led-off")
class TestBecaBHP6000ThermostatC(TuyaDeviceTestCase):
__test__ = True
def setUp(self):
self.setUpForConfig(
"beca_bhp6000_thermostat_c.yaml",
BECA_BHP6000_PAYLOAD,
)
self.subject = self.entities.get("climate")
self.mark_secondary(["light_display", "lock_child_lock"])
def test_temperature_unit_returns_configured_temperature_unit(self):
self.assertEqual(
self.subject.temperature_unit,
UnitOfTemperature.CELSIUS,
)
def test_minimum_target_temperature(self):
self.assertEqual(self.subject.min_temp, 5)
def test_maximum_target_temperature(self):
self.assertEqual(self.subject.max_temp, 35) |
6,343 | delete synapse scope | # -*- coding: utf-8 -*-
#
# symbol_table.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
from typing import Mapping
from pynestml.symbol_table.scope import Scope, ScopeType
class SymbolTable:
"""
This class is used to store a single symbol table, consisting of scope and symbols.
Attributes:
name2neuron_scope A dict from the name of a neuron to the corresponding scope. Type str->Scope
source_position The source position of the overall compilation unit. Type ASTSourceLocation
"""
name2neuron_scope = {} # type: Mapping[str, Scope]
name2synapse_scope = {}
source_location = None
@classmethod
def initialize_symbol_table(cls, source_position):
"""
Standard initializer.
"""
cls.source_location = source_position
cls.name2neuron_scope = {}
cls.name2synapse_scope = {}
@classmethod
def add_neuron_scope(cls, name, scope):
"""
Adds a single neuron scope to the set of stored scopes.
:return: a single scope element.
:rtype: Scope
"""
assert isinstance(scope, Scope), \
'(PyNestML.SymbolTable.SymbolTable) No or wrong type of scope provided (%s)!' % type(scope)
assert (scope.get_scope_type() == ScopeType.GLOBAL), \
'(PyNestML.SymbolTable.SymbolTable) Only global scopes can be added!'
assert isinstance(name, str), \
'(PyNestML.SymbolTable.SymbolTable) No or wrong type of name provided (%s)!' % type(name)
if name not in cls.name2neuron_scope.keys():
cls.name2neuron_scope[name] = scope
return
@classmethod
def delete_neuron_scope(cls, name):
"""
Deletes a single neuron scope from the set of stored scopes.
:return: the name of the scope to delete.
:rtype: Scope
"""
if name in cls.name2neuron_scope.keys():
del cls.name2neuron_scope[name]
return
@classmethod
def add_synapse_scope(cls, name, scope):
"""
Adds a single synapse scope to the set of stored scopes.
:return: a single scope element.
:rtype: Scope
"""
assert isinstance(scope, Scope), \
'(PyNestML.SymbolTable.SymbolTable) No or wrong type of scope provided (%s)!' % type(scope)
assert (scope.get_scope_type() == ScopeType.GLOBAL), \
'(PyNestML.SymbolTable.SymbolTable) Only global scopes can be added!'
assert isinstance(name, str), \
'(PyNestML.SymbolTable.SymbolTable) No or wrong type of name provided (%s)!' % type(name)
if name not in cls.name2synapse_scope.keys():
cls.name2synapse_scope[name] = scope
return
@classmethod
def METHOD_NAME(cls, name):
"""
Deletes a single synapse scope from the set of stored scopes.
:return: the name of the scope to delete.
:rtype: Scope
"""
if name in cls.name2synapse_scope.keys():
del cls.name2synapse_scope[name]
return
@classmethod
def clean_up_table(cls):
"""
Deletes all entries as stored in the symbol table.
"""
del cls.name2neuron_scope
cls.name2neuron_scope = {}
del cls.name2synapse_scope
cls.name2synapse_scope = {}
@classmethod
def print_symbol_table(cls) -> str:
"""
Prints the content of this symbol table.
"""
ret = ''
for _name in cls.name2neuron_scope.keys():
ret += '--------------------------------------------------\n'
ret += _name + ':\n'
ret += cls.name2neuron_scope[_name].print_scope()
for _name in cls.name2synapse_scope.keys():
ret += '--------------------------------------------------\n'
ret += _name + ':\n'
ret += cls.name2synapse_scope[_name].print_scope()
return ret |
6,344 | test error raised when private key file | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import unittest
import tempfile
import paramiko
import io
import os
import shutil
from knack.util import CLIError
from unittest import mock
from azure.cli.core.keys import generate_ssh_keys
class TestGenerateSSHKeys(unittest.TestCase):
def setUp(self):
# set up temporary directory to be used for temp files.
self._tempdirName = tempfile.mkdtemp(prefix="key_tmp_")
self.key = paramiko.RSAKey.generate(2048)
keyOutput = io.StringIO()
self.key.write_private_key(keyOutput)
self.private_key = keyOutput.getvalue()
self.public_key = '{} {}'.format(self.key.get_name(), self.key.get_base64())
def tearDown(self):
# delete temporary directory to be used for temp files.
shutil.rmtree(self._tempdirName)
def test_when_public_key_file_exists(self):
# Create public key file
public_key_path = self._create_new_temp_key_file(self.public_key, suffix=".pub")
# Create private key file
private_key_path = self._create_new_temp_key_file(self.private_key)
# Call generate_ssh_keys and assert that returned public key same as original
new_public_key = generate_ssh_keys("", public_key_path)
self.assertEqual(self.public_key, new_public_key)
# Check that private and public key file contents unchanged.
with open(public_key_path, 'r') as f:
new_public_key = f.read()
self.assertEqual(self.public_key, new_public_key)
with open(private_key_path, 'r') as f:
new_private_key = f.read()
self.assertEqual(self.private_key, new_private_key)
def test_error_raised_when_public_key_file_exists_IOError(self):
# Create public key file
public_key_path = self._create_new_temp_key_file(self.public_key)
with mock.patch('azure.cli.core.keys.open') as mocked_open:
# mock failed call to read
mocked_f = mocked_open.return_value.__enter__.return_value
mocked_f.read = mock.MagicMock(side_effect=IOError("Mocked IOError"))
# assert that CLIError raised when generate_ssh_keys is called
with self.assertRaises(CLIError):
generate_ssh_keys("", public_key_path)
# assert that CLIError raised because of attempt to read public key file.
mocked_open.assert_called_once_with(public_key_path, 'r')
mocked_f.read.assert_called_once()
def test_error_raised_when_private_key_file_exists_IOError(self):
# Create private key file
private_key_path = self._create_new_temp_key_file(self.private_key)
with mock.patch('paramiko.RSAKey') as mocked_RSAKey:
# mock failed RSAKey generation
mocked_RSAKey.side_effect = IOError("Mocked IOError")
# assert that CLIError raised when generate_ssh_keys is called
with self.assertRaises(CLIError):
public_key_path = private_key_path + ".pub"
generate_ssh_keys(private_key_path, public_key_path)
# assert that CLIError raised because of attempt to generate key from private key file.
mocked_RSAKey.assert_called_once_with(filename=private_key_path)
def METHOD_NAME(self):
# Create empty private key file
private_key_path = self._create_new_temp_key_file("")
# Write encrypted / passworded key into file
self.key.write_private_key_file(private_key_path, password="test")
# Check that CLIError exception is raised when generate_ssh_keys is called.
with self.assertRaises(CLIError):
public_key_path = private_key_path + ".pub"
generate_ssh_keys(private_key_path, public_key_path)
def test_generate_public_key_file_from_existing_private_key_files(self):
# Create private key file
private_key_path = self._create_new_temp_key_file(self.private_key)
# Call generate_ssh_keys and assert that returned public key same as original
public_key_path = private_key_path + ".pub"
new_public_key = generate_ssh_keys(private_key_path, public_key_path)
self.assertEqual(self.public_key, new_public_key)
# Check that correct public key file has been created
with open(public_key_path, 'r') as f:
public_key = f.read()
self.assertEqual(self.public_key, public_key)
# Check that private key file contents unchanged
with open(private_key_path, 'r') as f:
private_key = f.read()
self.assertEqual(self.private_key, private_key)
def test_generate_new_private_public_key_files(self):
# create random temp file name
f = tempfile.NamedTemporaryFile(mode='w', dir=self._tempdirName)
f.close()
private_key_path = f.name
# Call generate_ssh_keys and assert that returned public key same as original
public_key_path = private_key_path + ".pub"
new_public_key = generate_ssh_keys(private_key_path, public_key_path)
# Check that public key returned is same as public key in public key path
with open(public_key_path, 'r') as f:
public_key = f.read()
self.assertEqual(public_key, new_public_key)
# Check that public key corresponds to private key
with open(private_key_path, 'r') as f:
key = paramiko.RSAKey(filename=private_key_path)
public_key = '{} {}'.format(key.get_name(), key.get_base64())
self.assertEqual(public_key, new_public_key)
def _create_new_temp_key_file(self, key_data, suffix=""):
with tempfile.NamedTemporaryFile(mode='w', dir=self._tempdirName, delete=False, suffix=suffix) as f:
f.write(key_data)
return f.name
if __name__ == '__main__':
unittest.main() |
6,345 | interleave pattern | import os
from itertools import cycle, islice, chain
from align.cell_fabric import transformation
from .canvas import CanvasPDK
from .gen_transistor import mos
from align.schema.transistor import Transistor, TransistorArray
class MOSGenerator(CanvasPDK):
def __init__(self, *args, **kwargs):
super().__init__()
self.instantiated_cells = []
# TODO: Eliminate this method, mos_array instead
def addNMOSArray(self, x_cells, y_cells, pattern, vt_type, ports, **parameters):
self.mos_array_temporary_wrapper(x_cells, y_cells, pattern, vt_type, ports, **parameters)
# TODO: Eliminate this method, mos_array instead
def addPMOSArray(self, x_cells, y_cells, pattern, vt_type, ports, **parameters):
self.mos_array_temporary_wrapper(x_cells, y_cells, pattern, vt_type, ports, **parameters)
# TODO: Eliminate this method. Pass align/schema/transistor.py/TransistorArray object to mos_array directly
def mos_array_temporary_wrapper(self, x_cells, y_cells, pattern, vt_type, ports, **parameters):
#################################################################################################
# TODO: All of below goes away when TransistorArray is passed to mos_array as shown below
for key in ['m', 'real_inst_type']:
assert key in parameters, f'Missing transistor parameter {key}'
assert 'nf' or 'stack' in parameters, f'Missing transistor parameter nf or stack'
if 'nf' in parameters:
nf = 'nf'
device_type = 'parallel'
elif 'stack' in parameters:
nf = 'stack'
device_type = 'stack'
else:
nf = device_type = None
unit_transistor = Transistor(device_type=device_type,
nf=parameters[nf],
nfin=4,
model_name=parameters['real_inst_type'])
def find_ports(p, i):
d = {}
for (k, v) in p.items():
for t in v:
if t[0] == i:
d[t[1]] = k
return d
p1 = find_ports(ports, 'M1')
p = {1: p1}
m = {1: parameters['m']}
p2 = find_ports(ports, 'M2')
if len(p2) > 1:
m[2] = parameters['m']
p[2] = p2
transistor_array = TransistorArray(
unit_transistor=unit_transistor,
m=m,
ports=p,
n_rows=x_cells
)
# TODO: All of above goes away when TransistorArray is passed to mos_array as shown below
#################################################################################################
self.mos_array(transistor_array, **parameters)
def mos_array(self, transistor_array: TransistorArray, **parameters):
assert len(transistor_array.m) <= 2, f'Arrays of more than 2 devices not supported yet'
# Generate leaf cells
tx = mos(transistor_array.unit_transistor)
# Define the interleaving array (aka array logic)
n_row, n_col = self._calculate_row_col(transistor_array)
interleave = self.METHOD_NAME(transistor_array, n_row, n_col)
print(interleave)
cnt = 0
rows = []
for y in range(n_row):
row = []
for x in range(n_col):
pin_map = transistor_array.ports.get(interleave[cnt], transistor_array.ports[1])
flip_x = 1
row.append([tx, f'i{cnt}', pin_map, flip_x])
cnt += 1
rows.append(row)
# Stamp the instances
self.place(rows)
# Route
self.route()
self.computeBbox()
def stamp_cell(self, template, instance_name, pin_map, x_offset, y_offset, flip_x):
bbox = template['bbox']
# bounding box as visual aid
t = {'layer': 'Boundary', 'netName': None,
'rect': [bbox[0]+x_offset, bbox[1]+y_offset, bbox[2]+x_offset, bbox[3]+y_offset]}
self.terminals.append(t)
if flip_x < 0:
x_offset += bbox[2] - bbox[1]
# append terminals
for term in template['terminals']:
t = {}
r = term['rect'].copy()
if flip_x < 0:
t['rect'] = [x_offset-r[2], r[1]+y_offset, x_offset-r[0], r[3]+y_offset]
else:
t['rect'] = [x_offset+r[0], r[1]+y_offset, x_offset+r[2], r[3]+y_offset]
t['layer'] = term['layer']
t['netName'] = pin_map.get(term['netName'], None)
self.terminals.append(t)
# Cells listed below has to be instantiated during/after importing layout to Virtuoso
self.instantiated_cells.append([instance_name, (x_offset, y_offset, flip_x, 1), template['instance']])
def place(self, rows):
# keep record of what x, y, sx, sy the instance is stamped
x_offset = y_offset = 0
for row in rows:
x_offset = 0
for device in row:
[cell, instance_name, pin_map, flip_x] = device
self.stamp_cell(cell, instance_name, pin_map, x_offset, y_offset, flip_x)
x_offset += cell['bbox'][2] - cell['bbox'][0]
y_offset += cell['bbox'][3] - cell['bbox'][1]
self.bbox = transformation.Rect(*[0, 0, x_offset, y_offset])
print(self.bbox)
def route(self):
pass
@staticmethod
def _calculate_row_col(transistor_array: TransistorArray):
m = 0
for _, v in transistor_array.m.items():
m += v
assert m % transistor_array.n_rows == 0, \
f'Illegal number of rows {transistor_array.n_rows} for {m} devices in total'
return transistor_array.n_rows, m // transistor_array.n_rows
@staticmethod
def METHOD_NAME(transistor_array, n_row, n_col):
lst = []
if len(transistor_array.m) < 2:
for y in range(n_row):
lst.extend([0]*n_col)
else:
m = (n_col * n_row) // 2
if m % 2 == 0: # even
for y in range(n_row):
if y % 2 == 0:
lst.extend([k for k in islice(cycle([1, 2]), n_col)])
else:
lst.extend([k for k in islice(cycle([2, 1]), n_col)])
else: # odd
lst = [1, 2] * m
return lst
def test_one():
mg = MOSGenerator()
ports = {'SA': [('M1', 'S')], 'DA': [('M1', 'D')], 'GA': [('M1', 'G')]}
parameters = {'m': 4, 'nf': 2, 'real_inst_type': 'n'}
mg.addNMOSArray(2, 1, 1, None, ports, **parameters)
fn = os.path.join(os.environ['ALIGN_HOME'], 'Viewer/INPUT/test_primitive_one.json')
with open(fn, "wt") as fp:
mg.writeJSON(fp, draw_grid=False, run_drc=False, run_pex=False, postprocess=True)
def test_two():
mg = MOSGenerator()
ports = {'S': [('M1', 'S'), ('M2', 'S')],
'DA': [('M1', 'D')], 'DB': [('M2', 'D')],
'GA': [('M1', 'G')], 'GB': [('M2', 'G')]
}
parameters = {'m': 4, 'stack': 4, 'real_inst_type': 'n'}
mg.addNMOSArray(2, 1, 1, None, ports, **parameters)
fn = os.path.join(os.environ['ALIGN_HOME'], 'Viewer/INPUT/test_primitive_two.json')
with open(fn, "wt") as fp:
mg.writeJSON(fp, draw_grid=False, run_drc=False, run_pex=False, postprocess=True, )
test_one()
test_two() |
6,346 | someprog | # Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
""" Tests dace.program as class methods """
import dace
import numpy as np
import sys
import time
class MyTestClass:
""" Test class with various values, lifetimes, and call types. """
classvalue = 2
def __init__(self, n=5) -> None:
self.n = n
@dace.method
def method_jit(self, A):
return A + self.n
@dace.method
def method(self, A: dace.float64[20]):
return A + self.n
@dace.method
def __call__(self, A: dace.float64[20]):
return A * self.n
@dace.method
def other_method_caller(self, A: dace.float64[20]):
return self.method(A) + 2 + self(A)
@staticmethod
@dace.program
def static(A: dace.float64[20]):
return A + A
@staticmethod
@dace.program
def static_withclass(A: dace.float64[20]):
return A + MyTestClass.classvalue
@classmethod
@dace.method
def clsmethod(cls, A):
return A + cls.classvalue
class MyTestCallAttributesClass:
class SDFGMethodTestClass:
def __sdfg__(self, *args, **kwargs):
@dace.program
def call(A):
A[:] = 7.0
return call.__sdfg__(*args)
def __sdfg_signature__(self):
return ['A'], []
def __init__(self, n=5) -> None:
self.n = n
self.call_me = MyTestCallAttributesClass.SDFGMethodTestClass()
@dace.method
def method_jit(self, A):
self.call_me(A)
return A + self.n
@dace.method
def __call__(self, A):
self.call_me(A)
return A * self.n
@dace.method
def method(self, A: dace.float64[20]):
self.call_me(A)
return A + self.n
@dace.method
def method_jit_with_scalar_arg(self, A, b):
self.call_me(A)
return A + b
def test_method_jit():
A = np.random.rand(20)
cls = MyTestClass(10)
assert np.allclose(cls.method_jit(A), A + 10)
def test_method():
A = np.random.rand(20)
cls = MyTestClass(10)
assert np.allclose(cls.method(A), A + 10)
def test_method_cache():
A = np.random.rand(20)
cls1 = MyTestClass(10)
cls2 = MyTestClass(11)
assert np.allclose(cls1.method(A), A + 10)
assert np.allclose(cls1.method(A), A + 10)
assert np.allclose(cls2.method(A), A + 11)
def test_callable():
A = np.random.rand(20)
cls = MyTestClass(12)
assert np.allclose(cls(A), A * 12)
def test_static():
A = np.random.rand(20)
assert np.allclose(MyTestClass.static(A), A + A)
def test_static_withclass():
A = np.random.rand(20)
# TODO(later): Make cache strict w.r.t. globals and locals used in program
# assert np.allclose(MyTestClass.static_withclass(A), A + 2)
# Modify value
MyTestClass.classvalue = 3
assert np.allclose(MyTestClass.static_withclass(A), A + 3)
def test_classmethod():
# Only available in Python 3.9+
if sys.version_info >= (3, 9):
A = np.random.rand(20)
# Modify value first
MyTestClass.classvalue = 4
assert np.allclose(MyTestClass.clsmethod(A), A + 4)
def test_nested_methods():
A = np.random.rand(20)
cls = MyTestClass()
assert np.allclose(cls.other_method_caller(A), (A * 5) + (A + 5) + 2)
def mydec(a):
def mutator(func):
dp = dace.program(func)
@dace.program
def mmm(A: dace.float64[20]):
res = dp(A, a)
return res
sdfg = mmm.to_sdfg()
return sdfg
return mutator
def METHOD_NAME(A: dace.float64[20], a: dace.float64):
res = A + a
return res
def someprog_indirection(a):
return mydec(a)(METHOD_NAME)
def test_decorator():
@dace.program(constant_functions=True)
def otherprog(A: dace.float64[20]):
res = np.empty_like(A)
someprog_indirection(3)(A=A, __return=res)
return res
sdfg = otherprog.to_sdfg()
A = np.random.rand(20)
assert np.allclose(sdfg(A), A + 3)
def test_sdfgattr_method_jit():
A = np.random.rand(20)
cls = MyTestCallAttributesClass(10)
assert np.allclose(cls.method_jit(A), 17)
def test_sdfgattr_callable_jit():
A = np.random.rand(20)
cls = MyTestCallAttributesClass(12)
assert np.allclose(cls(A), 84)
def test_sdfgattr_method_annotated_jit():
A = np.random.rand(20)
cls = MyTestCallAttributesClass(14)
assert np.allclose(cls.method(A), 21)
def test_sdfgattr_method_jit_with_scalar():
A = np.random.rand(20)
cls = MyTestCallAttributesClass(10)
assert np.allclose(cls.method_jit_with_scalar_arg(A, 2.0), 9.0)
def test_nested_field_in_map():
class B:
def __init__(self) -> None:
self.field = np.random.rand(10, 10)
@dace.method
def callee(self):
return self.field[1, 1]
class A:
def __init__(self, nested: B):
self.nested = nested
@dace.method
def tester(self):
val = np.ndarray([2], np.float64)
for i in dace.map[0:2]:
val[i] = self.nested.callee()
return val
obj = A(B())
result = obj.tester()
assert np.allclose(result, np.array([obj.nested.field[1, 1], obj.nested.field[1, 1]]))
def test_nested_callback_in_map():
class B:
def __init__(self) -> None:
self.field = np.random.rand(10, 10)
@dace.method
def callee(self, val, i):
val[i] = time.time()
class A:
def __init__(self, nested: B):
self.nested = nested
@dace.method
def tester(self):
val = np.ndarray([2], np.float64)
for i in dace.map[0:2]:
self.nested.callee(val, i)
return val
obj = A(B())
old_time = time.time()
result = obj.tester()
new_time = time.time()
assert result[0] >= old_time and result[0] <= new_time
def test_unbounded_method():
@dace.method
def tester(a):
return a + 1
aa = np.random.rand(20)
assert np.allclose(tester(aa), aa + 1)
if __name__ == '__main__':
test_method_jit()
test_method()
test_method_cache()
test_callable()
test_static()
test_static_withclass()
test_classmethod()
test_nested_methods()
test_decorator()
test_sdfgattr_method_jit()
test_sdfgattr_callable_jit()
test_sdfgattr_method_annotated_jit()
test_sdfgattr_method_jit_with_scalar()
test_nested_field_in_map()
test_nested_callback_in_map()
test_unbounded_method() |
6,347 | get routes view | import io
import logging
from typing import Any, Callable, Generic, Literal, TypeAlias, TypeVar
from aiohttp import web
from aiohttp.web_exceptions import HTTPError, HTTPException
from aiohttp.web_routedef import RouteDef, RouteTableDef
from models_library.generics import Envelope
from pydantic import BaseModel, Field
from pydantic.generics import GenericModel
from servicelib.common_headers import X_FORWARDED_PROTO
from servicelib.json_serialization import json_dumps
from servicelib.mimetype_constants import MIMETYPE_APPLICATION_JSON
from servicelib.rest_constants import RESPONSE_MODEL_POLICY
from yarl import URL
from ._constants import INDEX_RESOURCE_NAME
log = logging.getLogger(__name__)
def rename_routes_as_handler_function(routes: RouteTableDef, *, prefix: str):
route: RouteDef
for route in routes:
route.kwargs["name"] = f"{prefix}.{route.handler.__name__}"
def METHOD_NAME(routes: RouteTableDef) -> str:
fh = io.StringIO()
print(routes, file=fh)
for r in routes:
print(" ", r, file=fh)
return fh.getvalue()
def create_url_for_function(request: web.Request) -> Callable:
app = request.app
def _url_for(route_name: str, **params: dict[str, Any]) -> str:
"""Reverse URL constructing using named resources"""
try:
rel_url: URL = app.router[route_name].url_for(
**{k: f"{v}" for k, v in params.items()}
)
url: URL = (
request.url.origin()
.with_scheme(
# Custom header by traefik. See labels in docker-compose as:
# - traefik.http.middlewares.${SWARM_STACK_NAME_NO_HYPHEN}_sslheader.headers.customrequestheaders.X-Forwarded-Proto=http
request.headers.get(X_FORWARDED_PROTO, request.url.scheme)
)
.with_path(str(rel_url))
)
return f"{url}"
except KeyError as err:
raise RuntimeError(
f"Cannot find URL because there is no resource registered as {route_name=}"
"Check name spelling or whether the router was not registered"
) from err
return _url_for
def envelope_json_response(
obj: Any, status_cls: type[HTTPException] = web.HTTPOk
) -> web.Response:
# NOTE: see https://github.com/ITISFoundation/osparc-simcore/issues/3646
if issubclass(status_cls, HTTPError):
enveloped = Envelope[Any](error=obj)
else:
enveloped = Envelope[Any](data=obj)
return web.Response(
text=json_dumps(enveloped.dict(**RESPONSE_MODEL_POLICY)),
content_type=MIMETYPE_APPLICATION_JSON,
status=status_cls.status_code,
)
#
# Special models and responses for the front-end
#
PageStr: TypeAlias = Literal["view", "error"]
def create_redirect_to_page_response(
app: web.Application, page: PageStr, **parameters
) -> web.HTTPFound:
"""
Returns a redirect response to the front-end with information on page
and parameters embedded in the fragment.
For instance,
https://osparc.io/#/error?message=Sorry%2C%20I%20could%20not%20find%20this%20&status_code=404
results from
- page=error
and parameters
- message="Sorry, I could not find this"
- status_code=404
Front-end can then render this data either in an error or a view page
"""
log.debug("page: '%s' parameters: '%s'", page, parameters)
assert page in ("view", "error") # nosec
# NOTE: uniform encoding in front-end using url fragments
# SEE https://github.com/ITISFoundation/osparc-simcore/issues/1975
fragment_path = f"{URL.build(path=f'/{page}').with_query(parameters)}"
redirect_url = (
app.router[INDEX_RESOURCE_NAME].url_for().with_fragment(fragment_path)
)
return web.HTTPFound(location=redirect_url)
PageParameters = TypeVar("PageParameters", bound=BaseModel)
class NextPage(GenericModel, Generic[PageParameters]):
"""
This is the body of a 2XX response to pass the front-end
what kind of page shall be display next and some information about it
An analogous structure is used in the redirects (see create_redirect_response) but
using a path+query in the fragment of the URL
"""
name: str = Field(
..., description="Code name to the front-end page. Ideally a PageStr"
)
parameters: PageParameters | None = None |
6,348 | method | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"databricks workspace private-endpoint-connection show",
)
class Show(AAZCommand):
"""Get a private endpoint connection properties for a workspace
"""
_aaz_info = {
"version": "2023-02-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.databricks/workspaces/{}/privateendpointconnections/{}", "2023-02-01"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.name = AAZStrArg(
options=["-n", "--name"],
help="The name of the private endpoint connection",
required=True,
id_part="child_name_1",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.workspace_name = AAZStrArg(
options=["--workspace-name"],
help="The name of the workspace.",
required=True,
id_part="name",
fmt=AAZStrArgFormat(
max_length=64,
min_length=3,
),
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.PrivateEndpointConnectionsGet(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class PrivateEndpointConnectionsGet(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/{workspaceName}/privateEndpointConnections/{privateEndpointConnectionName}",
**self.url_parameters
)
@property
def METHOD_NAME(self):
return "GET"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"privateEndpointConnectionName", self.ctx.args.name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
**self.serialize_url_param(
"workspaceName", self.ctx.args.workspace_name,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2023-02-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.id = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.name = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.properties = AAZObjectType(
flags={"required": True},
)
_schema_on_200.type = AAZStrType(
flags={"read_only": True},
)
properties = cls._schema_on_200.properties
properties.group_ids = AAZListType(
serialized_name="groupIds",
)
properties.private_endpoint = AAZObjectType(
serialized_name="privateEndpoint",
)
properties.private_link_service_connection_state = AAZObjectType(
serialized_name="privateLinkServiceConnectionState",
flags={"required": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
group_ids = cls._schema_on_200.properties.group_ids
group_ids.Element = AAZStrType()
private_endpoint = cls._schema_on_200.properties.private_endpoint
private_endpoint.id = AAZStrType(
flags={"read_only": True},
)
private_link_service_connection_state = cls._schema_on_200.properties.private_link_service_connection_state
private_link_service_connection_state.actions_required = AAZStrType(
serialized_name="actionsRequired",
)
private_link_service_connection_state.description = AAZStrType()
private_link_service_connection_state.status = AAZStrType(
flags={"required": True},
)
return cls._schema_on_200
class _ShowHelper:
"""Helper class for Show"""
__all__ = ["Show"] |
6,349 | start | # Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from unittest import mock
import pytest
from esrally import config, exceptions
from esrally.mechanic import mechanic
class TestHostHandling:
@mock.patch("esrally.utils.net.resolve")
def test_converts_valid_hosts(self, resolver):
resolver.side_effect = ["127.0.0.1", "10.16.23.5", "11.22.33.44"]
hosts = [
{"host": "127.0.0.1", "port": 9200},
# also applies default port if none given
{"host": "10.16.23.5"},
{"host": "site.example.com", "port": 9200},
]
assert mechanic.to_ip_port(hosts) == [
("127.0.0.1", 9200),
("10.16.23.5", 9200),
("11.22.33.44", 9200),
]
@mock.patch("esrally.utils.net.resolve")
def test_rejects_hosts_with_unexpected_properties(self, resolver):
resolver.side_effect = ["127.0.0.1", "10.16.23.5", "11.22.33.44"]
hosts = [
{"host": "127.0.0.1", "port": 9200, "ssl": True},
{"host": "10.16.23.5", "port": 10200},
{"host": "site.example.com", "port": 9200},
]
with pytest.raises(exceptions.SystemSetupError) as exc:
mechanic.to_ip_port(hosts)
assert exc.value.args[0] == (
"When specifying nodes to be managed by Rally you can only supply hostname:port pairs (e.g. 'localhost:9200'), "
"any additional options cannot be supported."
)
def test_groups_nodes_by_host(self):
ip_port = [
("127.0.0.1", 9200),
("127.0.0.1", 9200),
("127.0.0.1", 9200),
("10.16.23.5", 9200),
("11.22.33.44", 9200),
("11.22.33.44", 9200),
]
assert mechanic.nodes_by_host(ip_port) == {
("127.0.0.1", 9200): [0, 1, 2],
("10.16.23.5", 9200): [3],
("11.22.33.44", 9200): [4, 5],
}
def test_extract_all_node_ips(self):
ip_port = [
("127.0.0.1", 9200),
("127.0.0.1", 9200),
("127.0.0.1", 9200),
("10.16.23.5", 9200),
("11.22.33.44", 9200),
("11.22.33.44", 9200),
]
assert mechanic.extract_all_node_ips(ip_port) == {
"127.0.0.1",
"10.16.23.5",
"11.22.33.44",
}
class TestMechanic:
class Node:
def __init__(self, node_name):
self.node_name = node_name
class MockLauncher:
def __init__(self):
self.started = False
def METHOD_NAME(self, node_configs):
self.started = True
return [TestMechanic.Node(f"rally-node-{n}") for n in range(len(node_configs))]
def stop(self, nodes, metrics_store):
self.started = False
# We stub irrelevant methods for the test
class MockMechanic(mechanic.Mechanic):
def _current_race(self):
return "race 17"
def _add_results(self, current_race, node):
pass
@mock.patch("esrally.mechanic.provisioner.cleanup")
def test_start_stop_nodes(self, cleanup):
def supplier():
return "/home/user/src/elasticsearch/es.tar.gz"
provisioners = [mock.Mock(), mock.Mock()]
launcher = self.MockLauncher()
cfg = config.Config()
cfg.add(config.Scope.application, "system", "race.id", "17")
cfg.add(config.Scope.application, "mechanic", "preserve.install", False)
metrics_store = mock.Mock()
m = self.MockMechanic(cfg, metrics_store, supplier, provisioners, launcher)
m.start_engine()
assert launcher.started
for p in provisioners:
assert p.prepare.called
m.stop_engine()
assert not launcher.started
assert cleanup.call_count == 2 |
6,350 | is transparent | """A matplotlib backend for publishing figures via display_data"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
import matplotlib
from matplotlib.backends.backend_agg import new_figure_manager, FigureCanvasAgg # analysis: ignore
from matplotlib import colors
from matplotlib._pylab_helpers import Gcf
from IPython.core.getipython import get_ipython
from IPython.core.display import display
from .config import InlineBackend
def show(close=None, block=None):
"""Show all figures as SVG/PNG payloads sent to the IPython clients.
Parameters
----------
close : bool, optional
If true, a ``plt.close('all')`` call is automatically issued after
sending all the figures. If this is set, the figures will entirely
removed from the internal list of figures.
block : Not used.
The `block` parameter is a Matplotlib experimental parameter.
We accept it in the function signature for compatibility with other
backends.
"""
if close is None:
close = InlineBackend.instance().close_figures
try:
for figure_manager in Gcf.get_all_fig_managers():
display(
figure_manager.canvas.figure,
metadata=_fetch_figure_metadata(figure_manager.canvas.figure)
)
finally:
show._to_draw = []
# only call close('all') if any to close
# close triggers gc.collect, which can be slow
if close and Gcf.get_all_fig_managers():
matplotlib.pyplot.close('all')
# This flag will be reset by draw_if_interactive when called
show._draw_called = False
# list of figures to draw when flush_figures is called
show._to_draw = []
def draw_if_interactive():
"""
Is called after every pylab drawing command
"""
# signal that the current active figure should be sent at the end of
# execution. Also sets the _draw_called flag, signaling that there will be
# something to send. At the end of the code execution, a separate call to
# flush_figures() will act upon these values
manager = Gcf.get_active()
if manager is None:
return
fig = manager.canvas.figure
# Hack: matplotlib FigureManager objects in interacive backends (at least
# in some of them) monkeypatch the figure object and add a .show() method
# to it. This applies the same monkeypatch in order to support user code
# that might expect `.show()` to be part of the official API of figure
# objects.
# For further reference:
# https://github.com/ipython/ipython/issues/1612
# https://github.com/matplotlib/matplotlib/issues/835
if not hasattr(fig, 'show'):
# Queue up `fig` for display
fig.show = lambda *a: display(fig, metadata=_fetch_figure_metadata(fig))
# If matplotlib was manually set to non-interactive mode, this function
# should be a no-op (otherwise we'll generate duplicate plots, since a user
# who set ioff() manually expects to make separate draw/show calls).
if not matplotlib.is_interactive():
return
# ensure current figure will be drawn, and each subsequent call
# of draw_if_interactive() moves the active figure to ensure it is
# drawn last
try:
show._to_draw.remove(fig)
except ValueError:
# ensure it only appears in the draw list once
pass
# Queue up the figure for drawing in next show() call
show._to_draw.append(fig)
show._draw_called = True
def flush_figures():
"""Send all figures that changed
This is meant to be called automatically and will call show() if, during
prior code execution, there had been any calls to draw_if_interactive.
This function is meant to be used as a post_execute callback in IPython,
so user-caused errors are handled with showtraceback() instead of being
allowed to raise. If this function is not called from within IPython,
then these exceptions will raise.
"""
if not show._draw_called:
return
if InlineBackend.instance().close_figures:
# ignore the tracking, just draw and close all figures
try:
return show(True)
except Exception as e:
# safely show traceback if in IPython, else raise
ip = get_ipython()
if ip is None:
raise e
else:
ip.showtraceback()
return
try:
# exclude any figures that were closed:
active = set([fm.canvas.figure for fm in Gcf.get_all_fig_managers()])
for fig in [ fig for fig in show._to_draw if fig in active ]:
try:
display(fig, metadata=_fetch_figure_metadata(fig))
except Exception as e:
# safely show traceback if in IPython, else raise
ip = get_ipython()
if ip is None:
raise e
else:
ip.showtraceback()
return
finally:
# clear flags for next round
show._to_draw = []
show._draw_called = False
# Changes to matplotlib in version 1.2 requires a mpl backend to supply a default
# figurecanvas. This is set here to a Agg canvas
# See https://github.com/matplotlib/matplotlib/pull/1125
FigureCanvas = FigureCanvasAgg
def _enable_matplotlib_integration():
"""Enable extra IPython matplotlib integration when we are loaded as the matplotlib backend."""
from matplotlib import get_backend
ip = get_ipython()
backend = get_backend()
if ip and backend == 'module://%s' % __name__:
from IPython.core.pylabtools import configure_inline_support, activate_matplotlib
try:
activate_matplotlib(backend)
configure_inline_support(ip, backend)
except (ImportError, AttributeError):
# bugs may cause a circular import on Python 2
def configure_once(*args):
activate_matplotlib(backend)
configure_inline_support(ip, backend)
ip.events.unregister('post_run_cell', configure_once)
ip.events.register('post_run_cell', configure_once)
_enable_matplotlib_integration()
def _fetch_figure_metadata(fig):
"""Get some metadata to help with displaying a figure."""
# determine if a background is needed for legibility
if METHOD_NAME(fig.get_facecolor()):
# the background is transparent
ticksLight = _is_light([label.get_color()
for axes in fig.axes
for axis in (axes.xaxis, axes.yaxis)
for label in axis.get_ticklabels()])
if ticksLight.size and (ticksLight == ticksLight[0]).all():
# there are one or more tick labels, all with the same lightness
return {'needs_background': 'dark' if ticksLight[0] else 'light'}
return None
def _is_light(color):
"""Determines if a color (or each of a sequence of colors) is light (as
opposed to dark). Based on ITU BT.601 luminance formula (see
https://stackoverflow.com/a/596241)."""
rgbaArr = colors.to_rgba_array(color)
return rgbaArr[:,:3].dot((.299, .587, .114)) > .5
def METHOD_NAME(color):
"""Determine transparency from alpha."""
rgba = colors.to_rgba(color)
return rgba[3] < .5 |
6,351 | do get | import os
import sys
import webbrowser
from base64 import b64encode
from datetime import datetime
from http.server import BaseHTTPRequestHandler, HTTPServer
from jinja2 import Environment, FileSystemLoader
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import PythonLexer
from pylint.interfaces import IReporter
from pylint.reporters.ureports.nodes import BaseLayout
from .core import PythonTaReporter
TEMPLATES_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "templates")
class HTMLReporter(PythonTaReporter):
"""Reporter that displays results in HTML form.
By default, automatically opens the report in a web browser.
"""
name = "HTMLReporter"
_COLOURING = {
"black": '<span class="black">',
"black-line": '<span class="black line-num">',
"bold": "<span>",
"code-heading": "<span>",
"style-heading": "<span>",
"code-name": "<span>",
"style-name": "<span>",
"highlight": '<span class="highlight-pyta">',
"grey": '<span class="grey">',
"grey-line": '<span class="grey line-num">',
"gbold": '<span class="gbold">',
"gbold-line": '<span class="gbold line-num">',
"reset": "</span>",
}
_PRE_LINE_NUM_SPACES = 0
no_err_message = "No problems detected, good job!"
no_snippet = "No code to display for this message."
code_err_title = "Code Errors or Forbidden Usage (fix: high priority)"
style_err_title = "Style or Convention Errors (fix: before submission)"
OUTPUT_FILENAME = "pyta_report.html"
def print_messages(self, level="all"):
"""Do nothing to print messages, since all are displayed in a single HTML file."""
def display_messages(self, layout: BaseLayout) -> None:
"""Hook for displaying the messages of the reporter
This will be called whenever the underlying messages
needs to be displayed. For some reporters, it probably
doesn't make sense to display messages as soon as they
are available, so some mechanism of storing them could be used.
This method can be implemented to display them after they've
been aggregated.
"""
grouped_messages = {path: self.group_messages(msgs) for path, msgs in self.messages.items()}
template_f = self.linter.config.pyta_template_file
template = Environment(loader=FileSystemLoader(TEMPLATES_DIR)).get_template(template_f)
# Embed resources so the output html can go anywhere, independent of assets.
# with open(os.path.join(TEMPLATES_DIR, 'pyta_logo_markdown.png'), 'rb+') as image_file:
# # Encode img binary to base64 (+33% size), decode to remove the "b'"
# pyta_logo_base64_encoded = b64encode(image_file.read()).decode()
# Date/time (24 hour time) format:
# Generated: ShortDay. ShortMonth. PaddedDay LongYear, Hour:Min:Sec
dt = str(datetime.now().strftime("%a. %b. %d %Y, %I:%M:%S %p"))
# Render the jinja template
rendered_template = template.render(
date_time=dt,
reporter=self,
grouped_messages=grouped_messages,
os=os,
enumerate=enumerate,
)
# If a filepath was specified, write to the file
if self.out is not sys.stdout:
self.writeln(rendered_template)
else:
rendered_template = rendered_template.encode("utf8")
self._open_html_in_browser(rendered_template)
def _open_html_in_browser(self, html: bytes) -> None:
"""
Display html in a web browser without creating a temp file.
Instantiates a trivial http server and uses the webbrowser module to
open a URL to retrieve html from that server.
Adapted from: https://github.com/plotly/plotly.py/blob/master/packages/python/plotly/plotly/io/_base_renderers.py#L655
"""
class OneShotRequestHandler(BaseHTTPRequestHandler):
def METHOD_NAME(self):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
buffer_size = 1024 * 1024
for i in range(0, len(html), buffer_size):
self.wfile.write(html[i : i + buffer_size])
def log_message(self, format, *args):
"""Overridden so that no server logging is printed."""
pass
server = HTTPServer(("127.0.0.1", 0), OneShotRequestHandler)
webbrowser.open(f"http://127.0.0.1:{server.server_port}", new=2)
server.handle_request()
server.server_close()
print(
"[INFO] Your PythonTA report is being opened in your web browser.\n"
" If it doesn't open, please add an output argument to python_ta.check_all\n"
" as follows:\n\n"
" check_all(..., output='pyta_report.html')\n\n"
" This will cause PythonTA to save the report to a file, pyta_report.html,\n"
" that you can open manually in a web browser.",
file=sys.stderr,
)
@classmethod
def _colourify(cls, colour_class: str, text: str) -> str:
"""Return a colourized version of text, using colour_class."""
colour = cls._COLOURING[colour_class]
new_text = text.replace(" ", cls._SPACE)
if "-line" not in colour_class:
new_text = highlight(
new_text,
PythonLexer(),
HtmlFormatter(nowrap=True, lineseparator="", classprefix="pygments-"),
)
return colour + new_text + cls._COLOURING["reset"] |
6,352 | gettz | # -*- coding: utf-8 -*-
import warnings
import json
from tarfile import TarFile
from pkgutil import get_data
from io import BytesIO
from dateutil.tz import tzfile as _tzfile
__all__ = ["get_zonefile_instance", "gettz", "gettz_db_metadata"]
ZONEFILENAME = "dateutil-zoneinfo.tar.gz"
METADATA_FN = 'METADATA'
class tzfile(_tzfile):
def __reduce__(self):
return (METHOD_NAME, (self._filename,))
def getzoneinfofile_stream():
try:
return BytesIO(get_data(__name__, ZONEFILENAME))
except IOError as e: # TODO switch to FileNotFoundError?
warnings.warn("I/O error({0}): {1}".format(e.errno, e.strerror))
return None
class ZoneInfoFile(object):
def __init__(self, zonefile_stream=None):
if zonefile_stream is not None:
with TarFile.open(fileobj=zonefile_stream) as tf:
self.zones = {zf.name: tzfile(tf.extractfile(zf), filename=zf.name)
for zf in tf.getmembers()
if zf.isfile() and zf.name != METADATA_FN}
# deal with links: They'll point to their parent object. Less
# waste of memory
links = {zl.name: self.zones[zl.linkname]
for zl in tf.getmembers() if
zl.islnk() or zl.issym()}
self.zones.update(links)
try:
metadata_json = tf.extractfile(tf.getmember(METADATA_FN))
metadata_str = metadata_json.read().decode('UTF-8')
self.metadata = json.loads(metadata_str)
except KeyError:
# no metadata in tar file
self.metadata = None
else:
self.zones = {}
self.metadata = None
def get(self, name, default=None):
"""
Wrapper for :func:`ZoneInfoFile.zones.get`. This is a convenience method
for retrieving zones from the zone dictionary.
:param name:
The name of the zone to retrieve. (Generally IANA zone names)
:param default:
The value to return in the event of a missing key.
.. versionadded:: 2.6.0
"""
return self.zones.get(name, default)
# The current API has gettz as a module function, although in fact it taps into
# a stateful class. So as a workaround for now, without changing the API, we
# will create a new "global" class instance the first time a user requests a
# timezone. Ugly, but adheres to the api.
#
# TODO: Remove after deprecation period.
_CLASS_ZONE_INSTANCE = []
def get_zonefile_instance(new_instance=False):
"""
This is a convenience function which provides a :class:`ZoneInfoFile`
instance using the data provided by the ``dateutil`` package. By default, it
caches a single instance of the ZoneInfoFile object and returns that.
:param new_instance:
If ``True``, a new instance of :class:`ZoneInfoFile` is instantiated and
used as the cached instance for the next call. Otherwise, new instances
are created only as necessary.
:return:
Returns a :class:`ZoneInfoFile` object.
.. versionadded:: 2.6
"""
if new_instance:
zif = None
else:
zif = getattr(get_zonefile_instance, '_cached_instance', None)
if zif is None:
zif = ZoneInfoFile(getzoneinfofile_stream())
get_zonefile_instance._cached_instance = zif
return zif
def METHOD_NAME(name):
"""
This retrieves a time zone from the local zoneinfo tarball that is packaged
with dateutil.
:param name:
An IANA-style time zone name, as found in the zoneinfo file.
:return:
Returns a :class:`dateutil.tz.tzfile` time zone object.
.. warning::
It is generally inadvisable to use this function, and it is only
provided for API compatibility with earlier versions. This is *not*
equivalent to ``dateutil.tz.gettz()``, which selects an appropriate
time zone based on the inputs, favoring system zoneinfo. This is ONLY
for accessing the dateutil-specific zoneinfo (which may be out of
date compared to the system zoneinfo).
.. deprecated:: 2.6
If you need to use a specific zoneinfofile over the system zoneinfo,
instantiate a :class:`dateutil.zoneinfo.ZoneInfoFile` object and call
:func:`dateutil.zoneinfo.ZoneInfoFile.get(name)` instead.
Use :func:`get_zonefile_instance` to retrieve an instance of the
dateutil-provided zoneinfo.
"""
warnings.warn("zoneinfo.gettz() will be removed in future versions, "
"to use the dateutil-provided zoneinfo files, instantiate a "
"ZoneInfoFile object and use ZoneInfoFile.zones.get() "
"instead. See the documentation for details.",
DeprecationWarning)
if len(_CLASS_ZONE_INSTANCE) == 0:
_CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream()))
return _CLASS_ZONE_INSTANCE[0].zones.get(name)
def gettz_db_metadata():
""" Get the zonefile metadata
See `zonefile_metadata`_
:returns:
A dictionary with the database metadata
.. deprecated:: 2.6
See deprecation warning in :func:`zoneinfo.gettz`. To get metadata,
query the attribute ``zoneinfo.ZoneInfoFile.metadata``.
"""
warnings.warn("zoneinfo.gettz_db_metadata() will be removed in future "
"versions, to use the dateutil-provided zoneinfo files, "
"ZoneInfoFile object and query the 'metadata' attribute "
"instead. See the documentation for details.",
DeprecationWarning)
if len(_CLASS_ZONE_INSTANCE) == 0:
_CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream()))
return _CLASS_ZONE_INSTANCE[0].metadata |
6,353 | test proxy from env http without port | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for the http_client module."""
# pytype: skip-file
import os
import unittest
import mock
from httplib2 import ProxyInfo
from apache_beam.internal.http_client import DEFAULT_HTTP_TIMEOUT_SECONDS
from apache_beam.internal.http_client import get_new_http
from apache_beam.internal.http_client import proxy_info_from_environment_var
class HttpClientTest(unittest.TestCase):
def test_proxy_from_env_http_with_port(self):
with mock.patch.dict(os.environ, http_proxy='http://localhost:9000'):
proxy_info = proxy_info_from_environment_var('http_proxy')
expected = ProxyInfo(3, 'localhost', 9000)
self.assertEqual(str(expected), str(proxy_info))
def test_proxy_from_env_https_with_port(self):
with mock.patch.dict(os.environ, https_proxy='https://localhost:9000'):
proxy_info = proxy_info_from_environment_var('https_proxy')
expected = ProxyInfo(3, 'localhost', 9000)
self.assertEqual(str(expected), str(proxy_info))
def test_proxy_from_env_http_without_port(self):
with mock.patch.dict(os.environ, http_proxy='http://localhost'):
proxy_info = proxy_info_from_environment_var('http_proxy')
expected = ProxyInfo(3, 'localhost', 80)
self.assertEqual(str(expected), str(proxy_info))
def test_proxy_from_env_https_without_port(self):
with mock.patch.dict(os.environ, https_proxy='https://localhost'):
proxy_info = proxy_info_from_environment_var('https_proxy')
expected = ProxyInfo(3, 'localhost', 443)
self.assertEqual(str(expected), str(proxy_info))
def test_proxy_from_env_http_without_method(self):
with mock.patch.dict(os.environ, http_proxy='localhost:8000'):
proxy_info = proxy_info_from_environment_var('http_proxy')
expected = ProxyInfo(3, 'localhost', 8000)
self.assertEqual(str(expected), str(proxy_info))
def test_proxy_from_env_https_without_method(self):
with mock.patch.dict(os.environ, https_proxy='localhost:8000'):
proxy_info = proxy_info_from_environment_var('https_proxy')
expected = ProxyInfo(3, 'localhost', 8000)
self.assertEqual(str(expected), str(proxy_info))
def METHOD_NAME(self):
with mock.patch.dict(os.environ, http_proxy='localhost'):
proxy_info = proxy_info_from_environment_var('http_proxy')
expected = ProxyInfo(3, 'localhost', 80)
self.assertEqual(str(expected), str(proxy_info))
def test_proxy_from_env_https_without_port_without_method(self):
with mock.patch.dict(os.environ, https_proxy='localhost'):
proxy_info = proxy_info_from_environment_var('https_proxy')
expected = ProxyInfo(3, 'localhost', 443)
self.assertEqual(str(expected), str(proxy_info))
def test_proxy_from_env_invalid_var(self):
proxy_info = proxy_info_from_environment_var('http_proxy_host')
expected = None
self.assertEqual(str(expected), str(proxy_info))
def test_proxy_from_env_wrong_method_in_var_name(self):
with mock.patch.dict(os.environ, smtp_proxy='localhost'):
with self.assertRaises(KeyError):
proxy_info_from_environment_var('smtp_proxy')
def test_proxy_from_env_wrong_method_in_url(self):
with mock.patch.dict(os.environ, http_proxy='smtp://localhost:8000'):
proxy_info = proxy_info_from_environment_var('http_proxy')
expected = ProxyInfo(3, 'smtp', 80) # wrong proxy info generated
self.assertEqual(str(expected), str(proxy_info))
def test_get_new_http_proxy_info(self):
with mock.patch.dict(os.environ, http_proxy='localhost'):
http = get_new_http()
expected = ProxyInfo(3, 'localhost', 80)
self.assertEqual(str(http.proxy_info), str(expected))
def test_get_new_http_timeout(self):
http = get_new_http()
self.assertEqual(http.timeout, DEFAULT_HTTP_TIMEOUT_SECONDS)
if __name__ == '__main__':
unittest.main() |
6,354 | test create model tiny conv training | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for speech commands models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.examples.speech_commands import models
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
class ModelsTest(test.TestCase):
def _modelSettings(self):
return models.prepare_model_settings(
label_count=10,
sample_rate=16000,
clip_duration_ms=1000,
window_size_ms=20,
window_stride_ms=10,
feature_bin_count=40,
preprocess="mfcc")
def testPrepareModelSettings(self):
self.assertIsNotNone(
models.prepare_model_settings(
label_count=10,
sample_rate=16000,
clip_duration_ms=1000,
window_size_ms=20,
window_stride_ms=10,
feature_bin_count=40,
preprocess="mfcc"))
@test_util.run_deprecated_v1
def testCreateModelConvTraining(self):
model_settings = self._modelSettings()
with self.cached_session() as sess:
fingerprint_input = tf.zeros([1, model_settings["fingerprint_size"]])
logits, dropout_prob = models.create_model(fingerprint_input,
model_settings, "conv", True)
self.assertIsNotNone(logits)
self.assertIsNotNone(dropout_prob)
self.assertIsNotNone(sess.graph.get_tensor_by_name(logits.name))
self.assertIsNotNone(sess.graph.get_tensor_by_name(dropout_prob.name))
@test_util.run_deprecated_v1
def testCreateModelConvInference(self):
model_settings = self._modelSettings()
with self.cached_session() as sess:
fingerprint_input = tf.zeros([1, model_settings["fingerprint_size"]])
logits = models.create_model(fingerprint_input, model_settings, "conv",
False)
self.assertIsNotNone(logits)
self.assertIsNotNone(sess.graph.get_tensor_by_name(logits.name))
@test_util.run_deprecated_v1
def testCreateModelLowLatencyConvTraining(self):
model_settings = self._modelSettings()
with self.cached_session() as sess:
fingerprint_input = tf.zeros([1, model_settings["fingerprint_size"]])
logits, dropout_prob = models.create_model(
fingerprint_input, model_settings, "low_latency_conv", True)
self.assertIsNotNone(logits)
self.assertIsNotNone(dropout_prob)
self.assertIsNotNone(sess.graph.get_tensor_by_name(logits.name))
self.assertIsNotNone(sess.graph.get_tensor_by_name(dropout_prob.name))
@test_util.run_deprecated_v1
def testCreateModelFullyConnectedTraining(self):
model_settings = self._modelSettings()
with self.cached_session() as sess:
fingerprint_input = tf.zeros([1, model_settings["fingerprint_size"]])
logits, dropout_prob = models.create_model(
fingerprint_input, model_settings, "single_fc", True)
self.assertIsNotNone(logits)
self.assertIsNotNone(dropout_prob)
self.assertIsNotNone(sess.graph.get_tensor_by_name(logits.name))
self.assertIsNotNone(sess.graph.get_tensor_by_name(dropout_prob.name))
def testCreateModelBadArchitecture(self):
model_settings = self._modelSettings()
with self.cached_session():
fingerprint_input = tf.zeros([1, model_settings["fingerprint_size"]])
with self.assertRaises(Exception) as e:
models.create_model(fingerprint_input, model_settings,
"bad_architecture", True)
self.assertTrue("not recognized" in str(e.exception))
@test_util.run_deprecated_v1
def METHOD_NAME(self):
model_settings = self._modelSettings()
with self.cached_session() as sess:
fingerprint_input = tf.zeros([1, model_settings["fingerprint_size"]])
logits, dropout_prob = models.create_model(
fingerprint_input, model_settings, "tiny_conv", True)
self.assertIsNotNone(logits)
self.assertIsNotNone(dropout_prob)
self.assertIsNotNone(sess.graph.get_tensor_by_name(logits.name))
self.assertIsNotNone(sess.graph.get_tensor_by_name(dropout_prob.name))
if __name__ == "__main__":
test.main() |
6,355 | test merge cube attributes 1 cube | """Tests for :mod:`esmvalcore.iris_helpers`."""
import datetime
from copy import deepcopy
from itertools import permutations
from unittest import mock
import numpy as np
import pytest
from cf_units import Unit
from iris.coords import (
AncillaryVariable,
AuxCoord,
CellMeasure,
CellMethod,
DimCoord,
)
from iris.cube import Cube, CubeList
from iris.exceptions import CoordinateMultiDimError
from esmvalcore.iris_helpers import (
add_leading_dim_to_cube,
date2num,
merge_cube_attributes,
)
@pytest.fixture
def cubes():
"""Test cubes."""
cubes = CubeList([
Cube(0.0, var_name='a', long_name='a'),
Cube(0.0, var_name='a', long_name='b'),
Cube(0.0, var_name='c', long_name='d'),
])
return cubes
@pytest.fixture
def units():
return Unit('days since 0001-01-01', calendar='proleptic_gregorian')
def test_add_leading_dim_to_cube():
"""Test :func:`esmvalcore.iris_helpers.add_leading_dim_to_cube`."""
lat_coord = DimCoord(
[0.0, 1.0, 2.0],
var_name='lat',
standard_name='latitude',
long_name='latitude',
units='degrees_north',
)
lon_coord = DimCoord(
[0.0, 1.0],
var_name='lon',
standard_name='longitude',
long_name='longitude',
units='degrees_east',
)
height_coord = AuxCoord(
[2.0, 1.0],
var_name='height',
standard_name='height',
long_name='height',
units='m',
attributes={'positive': 'up'},
)
land_mask = AncillaryVariable(
[0.5, 0.2],
var_name='sftlf',
standard_name=None,
long_name='Land fraction',
units='1',
)
cell_area = CellMeasure(
[1.0, 2.0],
var_name='areacella',
standard_name='cell_area',
long_name='Cell Area',
units='m2',
measure='area',
)
cube = Cube(
[1, 42],
var_name='ta',
standard_name='air_temperature',
long_name='Air Temperature',
units='K',
attributes={'model_name': 'ESM'},
cell_methods=[CellMethod('mean', coords='time')],
aux_coords_and_dims=[(height_coord, 0)],
dim_coords_and_dims=[(lon_coord, 0)],
ancillary_variables_and_dims=[(land_mask, 0)],
cell_measures_and_dims=[(cell_area, 0)],
)
new_cube = add_leading_dim_to_cube(cube, lat_coord)
np.testing.assert_equal(new_cube.data, [[1, 42], [1, 42], [1, 42]])
assert new_cube.var_name == 'ta'
assert new_cube.standard_name == 'air_temperature'
assert new_cube.long_name == 'Air Temperature'
assert new_cube.units == 'K'
assert new_cube.cell_methods == (CellMethod('mean', coords='time'),)
assert new_cube.attributes == {'model_name': 'ESM'}
assert new_cube.coords(lat_coord, dim_coords=True)
assert new_cube.coords(lon_coord, dim_coords=True)
assert new_cube.coords(height_coord, dim_coords=False)
assert new_cube.coord_dims(lat_coord) == (0,)
assert new_cube.coord_dims(lon_coord) == (1,)
assert new_cube.coord_dims(height_coord) == (1,)
assert new_cube.ancillary_variables(land_mask)
assert new_cube.cell_measures(cell_area)
assert new_cube.ancillary_variable_dims(land_mask) == (1,)
assert new_cube.cell_measure_dims(cell_area) == (1,)
def test_add_leading_dim_to_cube_non_1d():
"""Test :func:`esmvalcore.iris_helpers.add_leading_dim_to_cube`."""
coord_2d = AuxCoord([[0, 1], [2, 3]], var_name='coord_2d')
msg = "Multi-dimensional coordinate not supported: 'coord_2d'"
with pytest.raises(CoordinateMultiDimError, match=msg):
add_leading_dim_to_cube(mock.sentinel.cube, coord_2d)
@pytest.mark.parametrize("date, dtype, expected", [
(datetime.datetime(1, 1, 1), np.float64, 0.0),
(datetime.datetime(1, 1, 1), int, 0.0),
(datetime.datetime(1, 1, 2, 12), np.float64, 1.5),
])
def test_date2num_scalar(date, dtype, expected, units):
num = date2num(date, units, dtype=dtype)
assert num == expected
assert num.dtype == dtype
def assert_attribues_equal(attrs_1: dict, attrs_2: dict) -> None:
"""Check attributes using :func:`numpy.testing.assert_array_equal`."""
assert len(attrs_1) == len(attrs_2)
for (attr, val) in attrs_1.items():
assert attr in attrs_2
np.testing.assert_array_equal(attrs_2[attr], val)
def make_cube_with_attrs(index):
"""Make cube that contains different types of attributes."""
attributes = {
# Identical attribute values across cubes
'int': 42,
'float': 3.1415,
'bool': True,
'str': 'Hello, world',
'list': [1, 1, 2, 3, 5, 8, 13],
'tuple': (1, 2, 3, 4, 5),
'nparray': np.arange(42),
# Differing attribute values across cubes
'diff_int': index,
'diff_str': 'abc'[index],
'diff_nparray': np.arange(index),
'mix': np.arange(3) if index == 0 else index,
'diff_list': [index, index],
'diff_tuple': (index, index),
# Differing attribute keys across cubes
str(index + 1000): index,
str(index % 2 + 100): index,
str(index % 2): index % 2,
}
return Cube(0.0, attributes=attributes)
CUBES = [make_cube_with_attrs(i) for i in range(3)]
# Test all permutations of CUBES to test that results do not depend on order
@pytest.mark.parametrize("cubes", list(permutations(CUBES)))
def test_merge_cube_attributes(cubes):
"""Test `merge_cube_attributes`."""
expected_attributes = {
'int': 42,
'float': 3.1415,
'bool': True,
'str': 'Hello, world',
'list': [1, 1, 2, 3, 5, 8, 13],
'tuple': (1, 2, 3, 4, 5),
'nparray': np.arange(42),
'diff_int': '0 1 2',
'diff_str': 'a b c',
'diff_nparray': '[0 1] [0] []',
'mix': '1 2 [0 1 2]',
'diff_list': '[0, 0] [1, 1] [2, 2]',
'diff_tuple': '(0, 0) (1, 1) (2, 2)',
'1000': 0,
'1001': 1,
'1002': 2,
'100': '0 2',
'101': 1,
'0': 0,
'1': 1,
}
cubes = deepcopy(cubes)
merge_cube_attributes(cubes)
assert len(cubes) == 3
for cube in cubes:
assert_attribues_equal(cube.attributes, expected_attributes)
def test_merge_cube_attributes_0_cubes():
"""Test `merge_cube_attributes` with 0 cubes."""
merge_cube_attributes([])
def METHOD_NAME():
"""Test `merge_cube_attributes` with 1 cube."""
cubes = CubeList([deepcopy(CUBES[0])])
expected_attributes = deepcopy(cubes[0].attributes)
merge_cube_attributes(cubes)
assert len(cubes) == 1
assert_attribues_equal(cubes[0].attributes, expected_attributes) |
6,356 | build arguments schema | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"networkfabric ipprefix show",
)
class Show(AAZCommand):
"""Show details of the provided Ip Prefix resource
:example: Show the Ip Prefix
az networkfabric ipprefix show --resource-group "example-rg" --resource-name "example-ipprefix"
"""
_aaz_info = {
"version": "2023-06-15",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.managednetworkfabric/ipprefixes/{}", "2023-06-15"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def METHOD_NAME(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super().METHOD_NAME(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_name = AAZStrArg(
options=["--resource-name"],
help="Name of the IP Prefix.",
required=True,
id_part="name",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
help="Name of the resource group",
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.IpPrefixesGet(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class IpPrefixesGet(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedNetworkFabric/ipPrefixes/{ipPrefixName}",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"ipPrefixName", self.ctx.args.resource_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2023-06-15",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.id = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.location = AAZStrType(
flags={"required": True},
)
_schema_on_200.name = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.properties = AAZObjectType(
flags={"required": True, "client_flatten": True},
)
_schema_on_200.system_data = AAZObjectType(
serialized_name="systemData",
flags={"read_only": True},
)
_schema_on_200.tags = AAZDictType()
_schema_on_200.type = AAZStrType(
flags={"read_only": True},
)
properties = cls._schema_on_200.properties
properties.administrative_state = AAZStrType(
serialized_name="administrativeState",
flags={"read_only": True},
)
properties.annotation = AAZStrType()
properties.configuration_state = AAZStrType(
serialized_name="configurationState",
flags={"read_only": True},
)
properties.ip_prefix_rules = AAZListType(
serialized_name="ipPrefixRules",
flags={"required": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
ip_prefix_rules = cls._schema_on_200.properties.ip_prefix_rules
ip_prefix_rules.Element = AAZObjectType()
_element = cls._schema_on_200.properties.ip_prefix_rules.Element
_element.action = AAZStrType(
flags={"required": True},
)
_element.condition = AAZStrType()
_element.network_prefix = AAZStrType(
serialized_name="networkPrefix",
flags={"required": True},
)
_element.sequence_number = AAZIntType(
serialized_name="sequenceNumber",
flags={"required": True},
)
_element.subnet_mask_length = AAZStrType(
serialized_name="subnetMaskLength",
)
system_data = cls._schema_on_200.system_data
system_data.created_at = AAZStrType(
serialized_name="createdAt",
)
system_data.created_by = AAZStrType(
serialized_name="createdBy",
)
system_data.created_by_type = AAZStrType(
serialized_name="createdByType",
)
system_data.last_modified_at = AAZStrType(
serialized_name="lastModifiedAt",
)
system_data.last_modified_by = AAZStrType(
serialized_name="lastModifiedBy",
)
system_data.last_modified_by_type = AAZStrType(
serialized_name="lastModifiedByType",
)
tags = cls._schema_on_200.tags
tags.Element = AAZStrType()
return cls._schema_on_200
class _ShowHelper:
"""Helper class for Show"""
__all__ = ["Show"] |
6,357 | test module hash | import datetime as dt
import io
import pathlib
import time
import numpy as np
import pandas as pd
import param
import pytest
try:
import diskcache
except Exception:
diskcache = None
diskcache_available = pytest.mark.skipif(diskcache is None, reason="requires diskcache")
from panel.io.cache import _find_hash_func, cache
from panel.io.state import set_curdoc
################
# Test hashing #
################
def hashes_equal(v1, v2):
a, b = _find_hash_func(v1)(v1), _find_hash_func(v2)(v2)
return a == b
def test_str_hash():
assert hashes_equal('foo', 'foo')
assert not hashes_equal('foo', 'bar')
def test_int_hash():
assert hashes_equal(12, 12)
assert not hashes_equal(1, 2)
def test_float_hash():
assert hashes_equal(3.14, 3.14)
assert not hashes_equal(1.2, 3.14)
def test_bool_hash():
assert hashes_equal(True, True)
assert hashes_equal(False, False)
assert not hashes_equal(True, False)
def test_none_hash():
assert hashes_equal(None, None)
assert not hashes_equal(None, False)
def test_bytes_hash():
assert hashes_equal(b'0', b'0')
assert not hashes_equal(b'0', b'1')
def test_date_hash():
assert hashes_equal(dt.date(1988, 4, 14), dt.date(1988, 4, 14))
assert not hashes_equal(dt.date(1988, 4, 14), dt.date(1988, 4, 15))
def test_datetime_hash():
assert hashes_equal(dt.datetime(1988, 4, 14, 12, 3, 2, 1), dt.datetime(1988, 4, 14, 12, 3, 2, 1))
assert not hashes_equal(dt.datetime(1988, 4, 14, 12, 3, 2, 1), dt.datetime(1988, 4, 14, 12, 3, 2, 2))
def test_list_hash():
assert hashes_equal([0], [0])
assert hashes_equal(['a', ['b']], ['a', ['b']])
assert not hashes_equal([0], [1])
assert not hashes_equal(['a', ['b']], ['a', ['c']])
# Recursion
l = [0]
l.append(l)
assert hashes_equal(l, list(l))
def test_tuple_hash():
assert hashes_equal((0,), (0,))
assert hashes_equal(('a', ('b',)), ('a', ('b',)))
assert not hashes_equal((0,), (1,))
assert not hashes_equal(('a', ('b',)), ('a', ('c',)))
def test_dict_hash():
assert hashes_equal({'a': 0}, {'a': 0})
assert hashes_equal({'a': {'b': 0}}, {'a': {'b': 0}})
assert not hashes_equal({'a': 0}, {'b': 0})
assert not hashes_equal({'a': 0}, {'a': 1})
assert not hashes_equal({'a': {'b': 0}}, {'a': {'b': 1}})
# Recursion
d = {'a': {}}
d['a'] = d
assert hashes_equal(d, dict(d))
def test_stringio_hash():
sio1, sio2 = io.StringIO(), io.StringIO()
sio1.write('foo')
sio2.write('foo')
sio1.seek(0)
sio2.seek(0)
assert hashes_equal(sio1, sio2)
sio3 = io.StringIO()
sio3.write('bar')
sio3.seek(0)
assert not hashes_equal(sio1, sio3)
def test_bytesio_hash():
bio1, bio2 = io.BytesIO(), io.BytesIO()
bio1.write(b'foo')
bio2.write(b'foo')
bio1.seek(0)
bio2.seek(0)
assert hashes_equal(bio1, bio2)
bio3 = io.BytesIO()
bio3.write(b'bar')
bio3.seek(0)
assert not hashes_equal(bio1, bio3)
def test_pathlib_hash():
assert hashes_equal(pathlib.Path('./'), pathlib.Path('./'))
assert not hashes_equal(pathlib.Path('./'), pathlib.Path('../'))
def test_ndarray_hash():
assert hashes_equal(np.array([0, 1, 2]), np.array([0, 1, 2]))
assert not hashes_equal(
np.array([0, 1, 2], dtype='uint32'),
np.array([0, 1, 2], dtype='float64')
)
assert not hashes_equal(
np.array([0, 1, 2]),
np.array([2, 1, 0])
)
def test_dataframe_hash():
df1, df2 = pd._testing.makeMixedDataFrame(), pd._testing.makeMixedDataFrame()
assert hashes_equal(df1, df2)
df2['A'] = df2['A'].values[::-1]
assert not hashes_equal(df1, df2)
def test_series_hash():
series1 = pd._testing.makeStringSeries()
series2 = series1.copy()
assert hashes_equal(series1, series2)
series2.iloc[0] = 3.14
assert not hashes_equal(series1, series2)
def test_ufunc_hash():
assert hashes_equal(np.absolute, np.absolute)
assert not hashes_equal(np.sin, np.cos)
def test_builtin_hash():
assert hashes_equal(max, max)
assert not hashes_equal(max, min)
def METHOD_NAME():
assert hashes_equal(np, np)
assert not hashes_equal(np, io)
################
# Test caching #
################
OFFSET = {}
def function_with_args(a, b):
global OFFSET
offset = OFFSET.get((a, b), 0)
result = a + b + offset
OFFSET[(a, b)] = offset + 1
return result
def test_cache_with_args():
global OFFSET
OFFSET.clear()
fn = cache(function_with_args)
assert fn(0, 0) == 0
assert fn(0, 0) == 0
def test_cache_with_kwargs():
global OFFSET
OFFSET.clear()
fn = cache(function_with_args)
assert fn(a=0, b=0) == 0
assert fn(a=0, b=0) == 0
def test_cache_clear():
global OFFSET
OFFSET.clear()
fn = cache(function_with_args)
assert fn(0, 0) == 0
fn.clear()
assert fn(0, 0) == 1
def test_per_session_cache(document):
global OFFSET
OFFSET.clear()
fn = cache(function_with_args, per_session=True)
with set_curdoc(document):
assert fn(a=0, b=0) == 0
assert fn(a=0, b=0) == 1
with set_curdoc(document):
assert fn(a=0, b=0) == 0
assert fn(a=0, b=0) == 1
@pytest.mark.xdist_group("cache")
@diskcache_available
def test_disk_cache():
global OFFSET
OFFSET.clear()
fn = cache(function_with_args, to_disk=True)
assert fn(0, 0) == 0
assert pathlib.Path('./cache').exists()
assert list(pathlib.Path('./cache').glob('*'))
assert fn(0, 0) == 0
fn.clear()
assert fn(0, 0) == 1
@pytest.mark.xdist_group("cache")
@pytest.mark.parametrize('to_disk', (True, False))
def test_cache_fifo(to_disk):
if to_disk and diskcache is None:
pytest.skip('requires diskcache')
global OFFSET
OFFSET.clear()
fn = cache(function_with_args, max_items=2, policy='fifo', to_disk=to_disk)
assert fn(0, 0) == 0
assert fn(0, 1) == 1
assert fn(0, 0) == 0
assert fn(0, 2) == 2 # (0, 0) should be evicted
assert fn(0, 0) == 1
@pytest.mark.xdist_group("cache")
@pytest.mark.parametrize('to_disk', (True, False))
def test_cache_lfu(to_disk):
if to_disk and diskcache is None:
pytest.skip('requires diskcache')
global OFFSET
OFFSET.clear()
fn = cache(function_with_args, max_items=2, policy='lfu', to_disk=to_disk)
assert fn(0, 0) == 0
assert fn(0, 0) == 0
assert fn(0, 1) == 1
assert fn(0, 2) == 2 # (0, 1) should be evicted
assert fn(0, 1) == 2
@pytest.mark.xdist_group("cache")
@pytest.mark.parametrize('to_disk', (True, False))
def test_cache_lru(to_disk):
if to_disk and diskcache is None:
pytest.skip('requires diskcache')
global OFFSET
OFFSET.clear()
fn = cache(function_with_args, max_items=3, policy='lru', to_disk=to_disk)
assert fn(0, 0) == 0
assert fn(0, 1) == 1
assert fn(0, 2) == 2
assert fn(0, 0) == 0
assert fn(0, 3) == 3 # (0, 1) should be evicted
assert fn(0, 0) == 0
assert fn(0, 1) == 2
@pytest.mark.xdist_group("cache")
@pytest.mark.parametrize('to_disk', (True, False))
def test_cache_ttl(to_disk):
if to_disk and diskcache is None:
pytest.skip('requires diskcache')
global OFFSET
OFFSET.clear()
fn = cache(function_with_args, ttl=0.1, to_disk=to_disk)
assert fn(0, 0) == 0
time.sleep(0.2)
assert fn(0, 0) == 1
@pytest.mark.xdist_group("cache")
def test_cache_on_undecorated_parameterized_method():
class Model(param.Parameterized):
data = param.Parameter(default=1)
executions = param.Integer(default=0)
@cache
def expensive_calculation(self, value):
self.executions += 1
return 2*value
model = Model()
assert model.expensive_calculation(1) == 2
assert model.expensive_calculation(1) == 2
assert model.executions == 1
assert model.expensive_calculation(2) == 4
assert model.executions == 2 |
6,358 | inactive subscription | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pretend
import pytest
from pyramid.exceptions import ConfigurationError
from pyramid.httpexceptions import HTTPSeeOther
from warehouse.organizations.models import OrganizationType
from warehouse.predicates import (
ActiveOrganizationPredicate,
DomainPredicate,
HeadersPredicate,
includeme,
)
from warehouse.subscriptions.models import StripeSubscriptionStatus
from ..common.db.organizations import (
OrganizationFactory,
OrganizationStripeCustomerFactory,
OrganizationStripeSubscriptionFactory,
)
from ..common.db.subscriptions import StripeSubscriptionFactory
class TestDomainPredicate:
@pytest.mark.parametrize(
("value", "expected"),
[(None, "domain = None"), ("pypi.io", "domain = {!r}".format("pypi.io"))],
)
def test_text(self, value, expected):
predicate = DomainPredicate(value, None)
assert predicate.text() == expected
assert predicate.phash() == expected
def test_when_not_set(self):
predicate = DomainPredicate(None, None)
assert predicate(None, None)
def test_valid_value(self):
predicate = DomainPredicate("upload.pypi.io", None)
assert predicate(None, pretend.stub(domain="upload.pypi.io"))
def test_invalid_value(self):
predicate = DomainPredicate("upload.pyp.io", None)
assert not predicate(None, pretend.stub(domain="pypi.io"))
class TestHeadersPredicate:
@pytest.mark.parametrize(
("value", "expected"),
[
(["Foo", "Bar"], "header Foo, header Bar"),
(["Foo", "Bar:baz"], "header Foo, header Bar=baz"),
],
)
def test_text(self, value, expected):
predicate = HeadersPredicate(value, None)
assert predicate.text() == expected
assert predicate.phash() == expected
def test_when_empty(self):
with pytest.raises(ConfigurationError):
HeadersPredicate([], None)
@pytest.mark.parametrize(
"value",
[["Foo", "Bar"], ["Foo", "Bar:baz"]],
)
def test_valid_value(self, value):
predicate = HeadersPredicate(value, None)
assert predicate(None, pretend.stub(headers={"Foo": "a", "Bar": "baz"}))
@pytest.mark.parametrize(
"value",
[["Foo", "Baz"], ["Foo", "Bar:foo"]],
)
def test_invalid_value(self, value):
predicate = HeadersPredicate(value, None)
assert not predicate(None, pretend.stub(headers={"Foo": "a", "Bar": "baz"}))
class TestActiveOrganizationPredicate:
@pytest.fixture
def organization(self):
organization = OrganizationFactory(
orgtype=OrganizationType.Company,
)
OrganizationStripeCustomerFactory(
organization=organization,
stripe_customer_id="mock-customer-id",
)
return organization
@pytest.fixture
def active_subscription(self, organization):
subscription = StripeSubscriptionFactory(
stripe_customer_id=organization.customer.customer_id,
status=StripeSubscriptionStatus.Active,
)
OrganizationStripeSubscriptionFactory(
organization=organization,
subscription=subscription,
)
return subscription
@pytest.fixture
def METHOD_NAME(self, organization):
subscription = StripeSubscriptionFactory(
stripe_customer_id=organization.customer.customer_id,
status=StripeSubscriptionStatus.PastDue,
)
OrganizationStripeSubscriptionFactory(
organization=organization,
subscription=subscription,
)
return subscription
@pytest.mark.parametrize(
("value", "expected"),
[
(True, "require_active_organization = True"),
(False, "require_active_organization = False"),
],
)
def test_text(self, value, expected):
predicate = ActiveOrganizationPredicate(value, None)
assert predicate.text() == expected
assert predicate.phash() == expected
def test_disable_predicate(self, db_request, organization):
predicate = ActiveOrganizationPredicate(False, None)
assert predicate(organization, db_request)
def test_disable_organizations(self, db_request, organization):
predicate = ActiveOrganizationPredicate(True, None)
assert not predicate(organization, db_request)
def test_inactive_organization(
self,
db_request,
organization,
enable_organizations,
):
db_request.route_path = pretend.call_recorder(
lambda *a, **kw: "/manage/organizations/"
)
organization.is_active = False
predicate = ActiveOrganizationPredicate(True, None)
with pytest.raises(HTTPSeeOther):
predicate(organization, db_request)
assert db_request.route_path.calls == [pretend.call("manage.organizations")]
def test_inactive_subscription(
self,
db_request,
organization,
enable_organizations,
METHOD_NAME,
):
db_request.route_path = pretend.call_recorder(
lambda *a, **kw: "/manage/organizations/"
)
predicate = ActiveOrganizationPredicate(True, None)
with pytest.raises(HTTPSeeOther):
predicate(organization, db_request)
assert db_request.route_path.calls == [pretend.call("manage.organizations")]
def test_active_subscription(
self, db_request, organization, enable_organizations, active_subscription
):
predicate = ActiveOrganizationPredicate(True, None)
assert predicate(organization, db_request)
def test_includeme():
config = pretend.stub(
add_route_predicate=pretend.call_recorder(lambda name, pred: None),
add_view_predicate=pretend.call_recorder(lambda name, pred: None),
)
includeme(config)
assert config.add_route_predicate.calls == [pretend.call("domain", DomainPredicate)]
assert config.add_view_predicate.calls == [
pretend.call("require_headers", HeadersPredicate),
pretend.call("require_active_organization", ActiveOrganizationPredicate),
] |
6,359 | query iterate collection with offset | import numpy as np
import random
from pymilvus import (
connections,
utility,
FieldSchema, CollectionSchema, DataType,
Collection,
)
HOST = "localhost"
PORT = "19530"
COLLECTION_NAME = "test_iterator"
USER_ID = "id"
MAX_LENGTH = 65535
AGE = "age"
DEPOSIT = "deposit"
PICTURE = "picture"
CONSISTENCY_LEVEL = "Eventually"
LIMIT = 5
NUM_ENTITIES = 1000
DIM = 8
CLEAR_EXIST = False
def re_create_collection():
if utility.has_collection(COLLECTION_NAME) and CLEAR_EXIST:
utility.drop_collection(COLLECTION_NAME)
print(f"dropped existed collection{COLLECTION_NAME}")
fields = [
FieldSchema(name=USER_ID, dtype=DataType.VARCHAR, is_primary=True,
auto_id=False, max_length=MAX_LENGTH),
FieldSchema(name=AGE, dtype=DataType.INT64),
FieldSchema(name=DEPOSIT, dtype=DataType.DOUBLE),
FieldSchema(name=PICTURE, dtype=DataType.FLOAT_VECTOR, dim=DIM)
]
schema = CollectionSchema(fields)
print(f"Create collection {COLLECTION_NAME}")
collection = Collection(COLLECTION_NAME, schema, consistency_level=CONSISTENCY_LEVEL)
return collection
def insert_data(collection):
rng = np.random.default_rng(seed=19530)
batch_count = 5
for i in range(batch_count):
entities = [
[str(random.randint(NUM_ENTITIES * i, NUM_ENTITIES * (i + 1))) for ni in range(NUM_ENTITIES)],
[int(ni % 100) for ni in range(NUM_ENTITIES)],
[float(ni) for ni in range(NUM_ENTITIES)],
rng.random((NUM_ENTITIES, DIM)),
]
collection.insert(entities)
collection.flush()
print(f"Finish insert batch{i}, number of entities in Milvus: {collection.num_entities}")
def prepare_index(collection):
index = {
"index_type": "IVF_FLAT",
"metric_type": "L2",
"params": {"nlist": 128},
}
collection.create_index(PICTURE, index)
print("Finish Creating index IVF_FLAT")
collection.load()
print("Finish Loading index IVF_FLAT")
def prepare_data(collection):
insert_data(collection)
prepare_index(collection)
return collection
def query_iterate_collection_no_offset(collection):
expr = f"10 <= {AGE} <= 14"
query_iterator = collection.query_iterator(expr=expr, output_fields=[USER_ID, AGE],
offset=0, batch_size=5, consistency_level=CONSISTENCY_LEVEL)
page_idx = 0
while True:
res = query_iterator.next()
if len(res) == 0:
print("query iteration finished, close")
query_iterator.close()
break
for i in range(len(res)):
print(res[i])
page_idx += 1
print(f"page{page_idx}-------------------------")
def METHOD_NAME(collection):
expr = f"10 <= {AGE} <= 14"
query_iterator = collection.query_iterator(expr=expr, output_fields=[USER_ID, AGE],
offset=10, batch_size=50, consistency_level=CONSISTENCY_LEVEL)
page_idx = 0
while True:
res = query_iterator.next()
if len(res) == 0:
print("query iteration finished, close")
query_iterator.close()
break
for i in range(len(res)):
print(res[i])
page_idx += 1
print(f"page{page_idx}-------------------------")
def query_iterate_collection_with_limit(collection):
expr = f"10 <= {AGE} <= 44"
query_iterator = collection.query_iterator(expr=expr, output_fields=[USER_ID, AGE],
batch_size=80, limit=530, consistency_level=CONSISTENCY_LEVEL)
page_idx = 0
while True:
res = query_iterator.next()
if len(res) == 0:
print("query iteration finished, close")
query_iterator.close()
break
for i in range(len(res)):
print(res[i])
page_idx += 1
print(f"page{page_idx}-------------------------")
def search_iterator_collection(collection):
SEARCH_NQ = 1
DIM = 8
rng = np.random.default_rng(seed=19530)
vectors_to_search = rng.random((SEARCH_NQ, DIM))
search_params = {
"metric_type": "L2",
"params": {"nprobe": 10, "radius": 1.0},
}
search_iterator = collection.search_iterator(vectors_to_search, PICTURE, search_params, batch_size=500,
output_fields=[USER_ID])
page_idx = 0
while True:
res = search_iterator.next()
if len(res) == 0:
print("query iteration finished, close")
search_iterator.close()
break
for i in range(len(res)):
print(res[i])
page_idx += 1
print(f"page{page_idx}-------------------------")
def search_iterator_collection_with_limit(collection):
SEARCH_NQ = 1
DIM = 8
rng = np.random.default_rng(seed=19530)
vectors_to_search = rng.random((SEARCH_NQ, DIM))
search_params = {
"metric_type": "L2",
"params": {"nprobe": 10, "radius": 1.0},
}
search_iterator = collection.search_iterator(vectors_to_search, PICTURE, search_params, batch_size=200, limit=755,
output_fields=[USER_ID])
page_idx = 0
while True:
res = search_iterator.next()
if len(res) == 0:
print("query iteration finished, close")
search_iterator.close()
break
for i in range(len(res)):
print(res[i])
page_idx += 1
print(f"page{page_idx}-------------------------")
def main():
connections.connect("default", host=HOST, port=PORT)
collection = re_create_collection()
collection = prepare_data(collection)
query_iterate_collection_no_offset(collection)
METHOD_NAME(collection)
query_iterate_collection_with_limit(collection)
search_iterator_collection(collection)
search_iterator_collection_with_limit(collection)
if __name__ == '__main__':
main() |
6,360 | close | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, TYPE_CHECKING
from azure.core.rest import HttpRequest, HttpResponse
from azure.mgmt.core import ARMPipelineClient
from . import models as _models
from ._configuration import AzureArcDataManagementClientConfiguration
from ._serialization import Deserializer, Serializer
from .operations import (
ActiveDirectoryConnectorsOperations,
DataControllersOperations,
Operations,
PostgresInstancesOperations,
SqlManagedInstancesOperations,
SqlServerInstancesOperations,
)
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class AzureArcDataManagementClient: # pylint: disable=client-accepts-api-version-keyword
"""The AzureArcData management API provides a RESTful set of web APIs to manage Azure Data
Services on Azure Arc Resources.
:ivar operations: Operations operations
:vartype operations: azure.mgmt.azurearcdata.operations.Operations
:ivar sql_managed_instances: SqlManagedInstancesOperations operations
:vartype sql_managed_instances:
azure.mgmt.azurearcdata.operations.SqlManagedInstancesOperations
:ivar sql_server_instances: SqlServerInstancesOperations operations
:vartype sql_server_instances: azure.mgmt.azurearcdata.operations.SqlServerInstancesOperations
:ivar data_controllers: DataControllersOperations operations
:vartype data_controllers: azure.mgmt.azurearcdata.operations.DataControllersOperations
:ivar active_directory_connectors: ActiveDirectoryConnectorsOperations operations
:vartype active_directory_connectors:
azure.mgmt.azurearcdata.operations.ActiveDirectoryConnectorsOperations
:ivar postgres_instances: PostgresInstancesOperations operations
:vartype postgres_instances: azure.mgmt.azurearcdata.operations.PostgresInstancesOperations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The ID of the Azure subscription. Required.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2022-03-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "TokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = AzureArcDataManagementClientConfiguration(
credential=credential, subscription_id=subscription_id, **kwargs
)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
self.sql_managed_instances = SqlManagedInstancesOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.sql_server_instances = SqlServerInstancesOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.data_controllers = DataControllersOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.active_directory_connectors = ActiveDirectoryConnectorsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.postgres_instances = PostgresInstancesOperations(
self._client, self._config, self._serialize, self._deserialize
)
def _send_request(self, request: HttpRequest, **kwargs: Any) -> HttpResponse:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client._send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
def METHOD_NAME(self) -> None:
self._client.METHOD_NAME()
def __enter__(self) -> "AzureArcDataManagementClient":
self._client.__enter__()
return self
def __exit__(self, *exc_details) -> None:
self._client.__exit__(*exc_details) |
6,361 | gen string table | """Benchmarks for Python's regex engine.
These are some of the original benchmarks used to tune Python's regex engine
in 2000 written by Fredrik Lundh. Retreived from
http://mail.python.org/pipermail/python-dev/2000-August/007797.html and
integrated into Unladen Swallow's pyperf.py in 2009 by David Laing.
These benchmarks are of interest since they helped to guide the original
optimization of the sre engine, and we shouldn't necessarily ignore them just
because they're "old".
"""
# Python imports
import re
# Local imports
import pyperf
from memray_helper import get_tracker
USE_BYTES = False
def re_compile(s):
if USE_BYTES:
return re.compile(s.encode("latin1"))
else:
return re.compile(s)
# These are the regular expressions to be tested. These sync up,
# index-for-index with the list of strings generated by gen_string_table()
# below.
def gen_regex_table():
return [
re_compile("Python|Perl"),
re_compile("Python|Perl"),
re_compile("(Python|Perl)"),
re_compile("(?:Python|Perl)"),
re_compile("Python"),
re_compile("Python"),
re_compile(".*Python"),
re_compile(".*Python.*"),
re_compile(".*(Python)"),
re_compile(".*(?:Python)"),
re_compile("Python|Perl|Tcl"),
re_compile("Python|Perl|Tcl"),
re_compile("(Python|Perl|Tcl)"),
re_compile("(?:Python|Perl|Tcl)"),
re_compile("(Python)\\1"),
re_compile("(Python)\\1"),
re_compile("([0a-z][a-z0-9]*,)+"),
re_compile("(?:[0a-z][a-z0-9]*,)+"),
re_compile("([a-z][a-z0-9]*,)+"),
re_compile("(?:[a-z][a-z0-9]*,)+"),
re_compile(".*P.*y.*t.*h.*o.*n.*"),
]
def METHOD_NAME(n):
"""Generates the list of strings that will be used in the benchmarks.
All strings have repeated prefixes and suffices, and n specifies the
number of repetitions.
"""
strings = []
def append(s):
if USE_BYTES:
strings.append(s.encode("latin1"))
else:
strings.append(s)
append("-" * n + "Perl" + "-" * n)
append("P" * n + "Perl" + "P" * n)
append("-" * n + "Perl" + "-" * n)
append("-" * n + "Perl" + "-" * n)
append("-" * n + "Python" + "-" * n)
append("P" * n + "Python" + "P" * n)
append("-" * n + "Python" + "-" * n)
append("-" * n + "Python" + "-" * n)
append("-" * n + "Python" + "-" * n)
append("-" * n + "Python" + "-" * n)
append("-" * n + "Perl" + "-" * n)
append("P" * n + "Perl" + "P" * n)
append("-" * n + "Perl" + "-" * n)
append("-" * n + "Perl" + "-" * n)
append("-" * n + "PythonPython" + "-" * n)
append("P" * n + "PythonPython" + "P" * n)
append("-" * n + "a5,b7,c9," + "-" * n)
append("-" * n + "a5,b7,c9," + "-" * n)
append("-" * n + "a5,b7,c9," + "-" * n)
append("-" * n + "a5,b7,c9," + "-" * n)
append("-" * n + "Python" + "-" * n)
return strings
def init_benchmarks(n_values=None):
"""Initialize the strings we'll run the regexes against.
The strings used in the benchmark are prefixed and suffixed by
strings that are repeated n times.
The sequence n_values contains the values for n.
If n_values is None the values of n from the original benchmark
are used.
The generated list of strings is cached in the string_tables
variable, which is indexed by n.
Returns:
A list of string prefix/suffix lengths.
"""
if n_values is None:
n_values = (0, 5, 50, 250, 1000, 5000, 10000)
string_tables = {n: METHOD_NAME(n) for n in n_values}
regexs = gen_regex_table()
data = []
for n in n_values:
for id in range(len(regexs)):
regex = regexs[id]
string = string_tables[n][id]
data.append((regex, string))
return data
def bench_regex_effbot(loops):
if bench_regex_effbot.data is None:
bench_regex_effbot.data = init_benchmarks()
data = bench_regex_effbot.data
range_it = range(loops)
search = re.search
with get_tracker():
t0 = pyperf.perf_counter()
for _ in range_it:
# Runs all of the benchmarks for a given value of n.
for regex, string in data:
# search 10 times
search(regex, string)
search(regex, string)
search(regex, string)
search(regex, string)
search(regex, string)
search(regex, string)
search(regex, string)
search(regex, string)
search(regex, string)
search(regex, string)
return pyperf.perf_counter() - t0
# cached data, generated at the first call
bench_regex_effbot.data = None
def add_cmdline_args(cmd, args):
if args.force_bytes:
cmd.append("--force_bytes")
if __name__ == "__main__":
runner = pyperf.Runner(add_cmdline_args=add_cmdline_args)
runner.metadata["description"] = (
"Test the performance of regexps " "using Fredik Lundh's benchmarks."
)
runner.argparser.add_argument(
"-B", "--force_bytes", action="store_true", help="test bytes regexps"
)
options = runner.parse_args()
if options.force_bytes:
USE_BYTES = True
runner.bench_time_func("regex_effbot", bench_regex_effbot, inner_loops=10) |
6,362 | post | # -*- coding: utf-8 -*-
"""
Helper module which exposes abstractions to write webservers easily
"""
from abc import ABC, abstractmethod
import socket
import http.server as http
from http import HTTPStatus
from urllib.parse import parse_qs, urlparse
import json
class Response():
""" Represents a HTTP `Response` object """
def __init__(self, status, body=None, headers=None):
if not isinstance(status, HTTPStatus):
raise TypeError('status has to be of type http.HTTPStatus')
if body and not isinstance(body, (str, dict)):
raise TypeError('body has to be of type str or dict')
if headers and not isinstance(headers, dict):
raise TypeError('headers has to be of type dict')
self.status = status
self.body = body
self.headers = headers
def get_body(self):
if not self.body:
return ''
if isinstance(self.body, dict):
return json.dumps(self.body)
return self.body
class Request():
""" Represents a HTTP `Request` object """
def __init__(self, path, qs=None, body=None, json=None, headers=None):
self.path = path
self.qs = qs
self.body = body
self.json = json
self.headers = headers
class RequestHandler(ABC):
"""
The class that users should sub-class and provide implementation. Each of
these functions **should** return an instance of the `Response` class
"""
@abstractmethod
def get(self, request):
pass
@abstractmethod
def METHOD_NAME(self, request):
pass
def MkHandlers(handlers):
class HTTPHandler(http.BaseHTTPRequestHandler):
def not_found(self):
self.send_response(HTTPStatus.NOT_FOUND)
self.end_headers()
self.wfile.write('<h1> Not Found </h1>'.encode('utf-8'))
def parse_path(self):
return urlparse(self.path)
def append_headers(self, headers):
for k, v in headers.items():
self.send_header(k, v)
def do_GET(self):
try:
raw_path = self.parse_path()
path = raw_path.path
handler = handlers[path]()
qs = parse_qs(raw_path.query)
req = Request(path, qs, None, None, self.headers)
resp = handler.get(req)
self.send_response(resp.status)
if resp.headers:
self.append_headers(resp.headers)
self.end_headers()
self.wfile.write(resp.get_body().encode('utf-8'))
except KeyError:
self.not_found()
def do_POST(self):
try:
raw_path = self.parse_path()
path = raw_path.path
handler = handlers[path]()
content_len = self.headers.get('Content-Length')
qs = None
req_body = self.rfile.read(int(content_len)).decode("utf-8")
req_json = None
if self.headers.get('Content-Type') == 'application/json':
req_json = json.loads(req_body)
req = Request(self.path, qs, req_body, req_json, self.headers)
resp = handler.METHOD_NAME(req)
self.send_response(resp.status)
if resp.headers:
self.append_headers(resp.headers)
#Required for graphiql to work with the graphQL test server
self.send_header('Access-Control-Allow-Origin', self.headers['Origin'])
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Methods', 'GET,POST,PUT,PATCH,DELETE,OPTIONS')
self.end_headers()
self.wfile.write(resp.get_body().encode('utf-8'))
except KeyError:
self.not_found()
def do_OPTIONS(self):
self.send_response(204)
#Required for graphiql to work with the graphQL test server
self.send_header('Access-Control-Allow-Methods', 'GET, POST, OPTIONS')
self.send_header('Access-Control-Max-Age', '1728000')
self.send_header('Access-Control-Allow-Headers', 'content-type,x-apollo-tracing')
self.send_header('Content-Type', 'text/plain charset=UTF-8')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', self.headers['Origin'])
self.send_header('Access-Control-Allow-Methods', 'GET,POST,PUT,PATCH,DELETE,OPTIONS')
self.end_headers()
def log_message(self, format, *args):
return
return HTTPHandler
class WebServer(http.HTTPServer):
def __init__(self, server_address, handler):
super().__init__(server_address, handler)
def server_bind(self):
print('Running http server on {0}:{1}'.format(self.server_address[0],
self.server_address[1]))
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.server_address) |
6,363 | create | '''
Plugin Rules
============
Methods described in this section relate to the plugin rules API.
These methods can be accessed at ``Nessus.plugin_rules``.
.. rst-class:: hide-signature
.. autoclass:: PluginRulesAPI
:members:
'''
from typing import List, Dict, Optional
from typing_extensions import Literal
from restfly.utils import dict_clean, dict_merge
from tenable.base.endpoint import APIEndpoint
from .iterators.plugins import PluginIterator
class PluginRulesAPI(APIEndpoint):
_path = 'plugin-rules'
def METHOD_NAME(self,
plugin_id: int,
type: Literal['recast_critical',
'recast_high',
'recast_medium',
'recast_low',
'recast_info',
'exclude'
],
host: Optional[str] = None,
date: Optional[int] = None
) -> None:
'''
Creates a new plugin rule
Args:
plugin_id (int): The plugin id to modify
type: (str): The type of modification to perform
host (str, optional): The host to apply this rule to
date (int, optional): The unix date for this rule to expire
Example:
>>> nessus.plugin_rules.create(
... plugin_id=19506,
... type='exclude',
... host='192.168.0.1',
... date=1645164000
... )
'''
self._post(json={
'plugin_id': str(plugin_id),
'type': type,
'host': host if host else '',
'date': date
})
def delete(self, rule_id: int) -> None:
'''
Deletes a plugin rule
Args:
rule_id (int): The rule to delete
Example:
>>> nessus.plugin_rules.delete(1)
'''
self._delete(f'{rule_id}')
def delete_many(self, rule_ids: List[int]) -> None:
'''
Deletes multiple plugin rules
Args:
rule_ids (list[int]): The rules to delete
Example:
>>> nessus.plugin_rules.delete_many([1, 2, 3])
'''
self._delete(json={'ids': rule_ids})
def edit(self,
rule_id: int,
plugin_id: Optional[int] = None,
type: Optional[Literal['recast_critical',
'recast_high',
'recast_medium',
'recast_low',
'recast_info',
'exclude'
]] = None,
host: Optional[str] = None,
date: Optional[int] = None
) -> None:
'''
Creates a new plugin rule
Args:
rule_id (int): The rule to modify
plugin_id (int, optional): The plugin id to modify
type: (str, optional): The type of modification to perform
host (str, optional): The host to apply this rule to
date (int, optional): The unix date for this rule to expire
Example:
>>> nessus.plugin_rules.edit(1, date=1645164000)
'''
rule = self.details(1)
payload = dict_merge(rule, dict_clean({
'plugin_id': str(plugin_id),
'type': type,
'host': host,
'date': date
}))
return self._put(f'{rule_id}', json=payload)
def list(self) -> List[Dict]:
'''
Lists the plugin rules
Return:
List[Dict]:
List of plugin rule objects
Example:
>>> for rule in nessus.plugin_rules.list():
... print(rule)
'''
return self._get()['plugin_rules']
def details(self, rule_id: int) -> Dict:
'''
Returns the details of a given plugin rule
Args:
rule_id (int): The plugin rule id
Returns:
Dict:
The plugin rule object requested
Example:
>>> nessus.plugin_rules.details(1)
'''
return self._get(f'{rule_id}') |
6,364 | test publication upgrade 7 8 | import pytest
def test_publication_upgrade(upgrader, publication_1):
value = upgrader.upgrade('publication', publication_1, target_version='2')
assert value['schema_version'] == '2'
assert 'references' not in value
assert value['identifiers'] == ['PMID:25409824']
assert value['lab'] == "cb0ef1f6-3bd3-4000-8636-1c5b9f7000dc"
assert value['award'] == "b5736134-3326-448b-a91a-894aafb77876"
def test_publication_upgrade_4_5(upgrader, publication_4):
publication_4['status'] = 'planned'
value = upgrader.upgrade('publication', publication_4,
current_version='4', target_version='5')
assert value['status'] == 'in preparation'
publication_4['status'] = 'replaced'
value = upgrader.upgrade('publication', publication_4,
current_version='4', target_version='5')
assert value['status'] == 'deleted'
publication_4['status'] = 'in press'
value = upgrader.upgrade('publication', publication_4,
current_version='4', target_version='5')
assert value['status'] == 'submitted'
publication_4['status'] = 'in revision'
value = upgrader.upgrade('publication', publication_4,
current_version='4', target_version='5')
assert value['status'] == 'submitted'
def test_publication_upgrade_5_6(upgrader, publication_5):
value = upgrader.upgrade('publication', publication_5, current_version='5', target_version='6')
assert value['schema_version'] == '6'
assert value['status'] == 'in progress'
publication_5['status'] = 'published'
value = upgrader.upgrade('publication', publication_5, current_version='5', target_version='6')
assert value['schema_version'] == '6'
assert value['status'] == 'released'
publication_5['status'] = 'submitted'
value = upgrader.upgrade('publication', publication_5, current_version='5', target_version='6')
assert value['schema_version'] == '6'
assert value['status'] == 'in progress'
publication_5['status'] = 'deleted'
value = upgrader.upgrade('publication', publication_5, current_version='5', target_version='6')
assert value['schema_version'] == '6'
assert value['status'] == 'deleted'
def test_publication_upgrade_6_7(
root,
testapp,
upgrader,
registry,
publication_6,
annotation_dataset,
base_experiment,
base_experiment_series,
base_reference,
functional_characterization_experiment,
publication_data,
):
publication_6['schema_version'] = '6'
publication_6['datasets'] = [
annotation_dataset['uuid'],
base_experiment['uuid'],
base_experiment_series['uuid'],
base_reference['uuid'],
functional_characterization_experiment['uuid'],
publication_data['uuid']
]
context = root.get_by_uuid(publication_6['uuid'])
value = upgrader.upgrade(
'publication',
publication_6,
registry=registry,
current_version='6',
target_version='7',
context=context,
)
assert value['schema_version'] == '7'
assert len(value['datasets']) == 4
assert annotation_dataset['uuid'] in value['datasets']
assert base_experiment['uuid'] in value['datasets']
assert base_experiment_series['uuid'] not in value['datasets']
assert base_reference['uuid'] in value['datasets']
assert functional_characterization_experiment['uuid'] in value['datasets']
new_publication_data = testapp.get(
publication_data['@id'] + '@@index-data'
).json['object']
assert publication_6['@id'] in new_publication_data['references']
new_publication = testapp.get(
publication_6['@id'] + '@@index-data'
).json['object']
assert publication_data['@id'] in new_publication['publication_data']
def METHOD_NAME(upgrader, publication_7):
value = upgrader.upgrade('publication', publication_7, current_version='7', target_version='8')
assert value['schema_version'] == '8'
assert 'Incorrect date_published formatting: 3/30/20' in value['notes']
assert 'date_published' not in value
def test_publication_upgrade_8_9(upgrader, publication_8):
value = upgrader.upgrade('publication', publication_8, current_version='8', target_version='9')
assert value['schema_version'] == '9'
assert 'datasets' not in value
assert 'Publication datasets:' in value['notes']
assert '/experiments/' in value['notes']
assert '/annotations/' in value['notes'] |
6,365 | replace | # A part of NonVisual Desktop Access (NVDA)
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
# Copyright (C) 2020-2022 NV Access Limited, Cyrille Bougot
"""Unit tests for the characterProcessing module.
"""
import unittest
import re
from characterProcessing import SpeechSymbolProcessor
from characterProcessing import SymbolLevel
from characterProcessing import processSpeechSymbols as process
from characterProcessing import processSpeechSymbol
class TestComplex(unittest.TestCase):
"""Test the complex symbols rules.
"""
def _replace_cb(self, replacement, name=None):
"""Return a regexp callback which replaces matches of the given
group name (or all groups if no name is given) with the
replacement string, with support for replacement of group
references.
"""
def replace(m):
if name is None or m.lastgroup == name:
return SpeechSymbolProcessor._replaceGroups(self, m, replacement)
return m.group()
return replace
def METHOD_NAME(self, string, pattern, replacement, name=None):
"""Perform a pattern-based replacement on a string, for the
given named group (or all groups if no name is given), with
support for replacement of group references.
"""
regexp = re.compile(pattern, re.UNICODE)
return regexp.sub(self._replace_cb(replacement, name), string)
def test_group_replacement(self):
"""Test that plain text gets properly replaced
"""
replaced = self.METHOD_NAME(
string="1",
pattern=r"(\d)",
replacement="a"
)
self.assertEqual(replaced, "a")
def test_backslash_replacement(self):
"""Test that backslashes get properly replaced
"""
replaced = self.METHOD_NAME(
string="1",
pattern=r"(\d)",
replacement=r"\\"
)
self.assertEqual(replaced, "\\")
def test_double_backslash_replacement(self):
"""Test that double backslashes get properly replaced
"""
replaced = self.METHOD_NAME(
string="1",
pattern=r"(\d)",
replacement=r"\\\\"
)
self.assertEqual(replaced, r"\\")
def test_unknown_escape(self):
"""Test that a non-supported escaped character (i.e. not \\1,
\\2, ... \\9 and \\\\) in the replacement raises an error
"""
with self.assertRaises(LookupError):
self.METHOD_NAME(
string="1",
pattern=r"(\d)",
replacement=r"\a"
)
def test_missing_group(self):
"""Test that a reference in the replacement to an non-existing
group raises an error
"""
with self.assertRaises(IndexError):
self.METHOD_NAME(
string="1",
pattern=r"(\d)",
replacement=r"\2"
)
def test_unterminated_escape(self):
"""Test that an escape at the end of replacement raises an
error, since there is nothing to be escaped there
"""
with self.assertRaises(LookupError):
self.METHOD_NAME(
string="1",
pattern=r"(\d)",
replacement="\\"
)
def test_group_replacements(self):
"""Test that group references get properly replaced
"""
replaced = self.METHOD_NAME(
string="bar.BAT",
pattern=r"(([a-z]*)\.([A-Z]*))",
replacement=r"\2>\1"
)
self.assertEqual(replaced, "BAT>bar")
def test_multiple_group_replacement(self):
"""Test that group indexing is correct with multiple groups
"""
replaced = self.METHOD_NAME(
string="bar.BAT",
pattern=r"(baz)|(?P<foo>([a-z]*)\.([A-Z]*))",
replacement=r"\2>\1",
name="foo"
)
self.assertEqual(replaced, "BAT>bar")
def test_engine(self):
"""Test inclusion of group replacement in engine
"""
replaced = process("fr_FR", "Le 03.04.05.", SymbolLevel.ALL)
self.assertEqual(replaced, "Le 03 point 04 point 05 point.")
replaced = process("fr_FR", "Le 03/04/05.", SymbolLevel.ALL)
self.assertEqual(replaced, "Le 03 barre oblique 04 barre oblique 05 point.")
# A character in CLDR file but not in symbol file
CHAR_IN_CLDR_FILE = '☺'
CHAR_IN_CLDR_FILE_DESC = 'smiling face'
# A character in symbol file but not in CLDR file
CHAR_IN_SYMB_FILE = ' '
CHAR_IN_SYMB_FILE_DESC = 'space'
# A character in both CLDR and symbol file
CHAR_IN_BOTH_FILES = '.'
CHAR_IN_BOTH_FILES_DESC = 'dot'
class TestUsingCLDR(unittest.TestCase):
def test_processSpeechSymbol_withoutSymbolFile(self):
"""Test processSpeechSymbol with languages that have CLDR file but no symbol file.
"""
languagesWithoutSymbolFile = [
# The real list is:
# 'af_ZA', 'am', 'as', 'gu', 'id', 'kok', 'ml', 'mni', 'ne', 'te', 'th', 'ur'
# But 'mni' has only a few symbols in CLDR (and not the smiling face)
'af_ZA', 'am', 'as', 'gu', 'id', 'kok', 'ml', 'ne', 'te', 'th', 'ur'
]
for locale in languagesWithoutSymbolFile:
self.assertNotEqual(
processSpeechSymbol(locale, CHAR_IN_CLDR_FILE),
CHAR_IN_CLDR_FILE_DESC,
msg=f'Test failure for locale={locale} with "{CHAR_IN_CLDR_FILE_DESC}"',
)
self.assertEqual(
processSpeechSymbol(locale, CHAR_IN_SYMB_FILE),
CHAR_IN_SYMB_FILE_DESC,
msg=f'Test failure for locale={locale} with "{CHAR_IN_SYMB_FILE_DESC}"',
)
def test_processSpeechSymbol_withSymbolFile(self):
"""Test processSpeechSymbol with languages that have both CLDR and symbol file.
"""
languagesWithSymbolFileAndCLDR = ['fr_FR']
for locale in languagesWithSymbolFileAndCLDR:
self.assertNotEqual(
processSpeechSymbol(locale, CHAR_IN_CLDR_FILE),
CHAR_IN_CLDR_FILE_DESC,
msg=f'Test failure for locale={locale} with "{CHAR_IN_CLDR_FILE_DESC}"',
)
self.assertNotEqual(
processSpeechSymbol(locale, CHAR_IN_SYMB_FILE),
CHAR_IN_SYMB_FILE_DESC,
msg=f'Test failure for locale={locale} with "{CHAR_IN_SYMB_FILE_DESC}"',
) |
6,366 | id | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetTaskResult',
'AwaitableGetTaskResult',
'get_task',
'get_task_output',
]
@pulumi.output_type
class GetTaskResult:
"""
A task resource
"""
def __init__(__self__, etag=None, METHOD_NAME=None, name=None, properties=None, system_data=None, type=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", METHOD_NAME)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
HTTP strong entity tag value. This is ignored if submitted.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> Any:
"""
Custom task properties
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetTaskResult(GetTaskResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetTaskResult(
etag=self.etag,
METHOD_NAME=self.METHOD_NAME,
name=self.name,
properties=self.properties,
system_data=self.system_data,
type=self.type)
def get_task(expand: Optional[str] = None,
group_name: Optional[str] = None,
project_name: Optional[str] = None,
service_name: Optional[str] = None,
task_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetTaskResult:
"""
The tasks resource is a nested, proxy-only resource representing work performed by a DMS instance. The GET method retrieves information about a task.
Azure REST API version: 2021-06-30.
:param str expand: Expand the response
:param str group_name: Name of the resource group
:param str project_name: Name of the project
:param str service_name: Name of the service
:param str task_name: Name of the Task
"""
__args__ = dict()
__args__['expand'] = expand
__args__['groupName'] = group_name
__args__['projectName'] = project_name
__args__['serviceName'] = service_name
__args__['taskName'] = task_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:datamigration:getTask', __args__, opts=opts, typ=GetTaskResult).value
return AwaitableGetTaskResult(
etag=pulumi.get(__ret__, 'etag'),
METHOD_NAME=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_task)
def get_task_output(expand: Optional[pulumi.Input[Optional[str]]] = None,
group_name: Optional[pulumi.Input[str]] = None,
project_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
task_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetTaskResult]:
"""
The tasks resource is a nested, proxy-only resource representing work performed by a DMS instance. The GET method retrieves information about a task.
Azure REST API version: 2021-06-30.
:param str expand: Expand the response
:param str group_name: Name of the resource group
:param str project_name: Name of the project
:param str service_name: Name of the service
:param str task_name: Name of the Task
"""
... |
6,367 | location name from url | #!/usr/bin/env python3
############################################################################
#
# MODULE: g.download.location
# AUTHOR(S): Vaclav Petras <wenzeslaus gmail com>
# PURPOSE: Download and extract location from web
# COPYRIGHT: (C) 2017 by the GRASS Development Team
#
# This program is free software under the GNU General
# Public License (>=v2). Read the file COPYING that
# comes with GRASS for details.
#
#############################################################################
"""Download GRASS Locations"""
# %module
# % label: Download GRASS Location from the web
# % description: Get GRASS Location from an URL or file path
# % keyword: general
# % keyword: data
# % keyword: download
# % keyword: import
# %end
# %option
# % key: url
# % multiple: no
# % type: string
# % label: URL of the archive with a location to be downloaded
# % description: URL of ZIP, TAR.GZ, or other similar archive
# % required: yes
# %end
# %option G_OPT_M_LOCATION
# % key: name
# % required: no
# % multiple: no
# % key_desc: name
# %end
# %option G_OPT_M_DBASE
# % key: path
# % required: no
# % multiple: no
# %end
import atexit
import os
import shutil
from pathlib import Path
import grass.script as gs
from grass.grassdb.checks import is_location_valid
from grass.script.utils import try_rmdir
from grass.utils.download import DownloadError, download_and_extract, name_from_url
def find_location_in_directory(path, recurse=0):
"""Return path to location in one of the subdirectories or None
The first location found is returned. The expected usage is looking for one
location somewhere nested in subdirectories.
By default only the immediate subdirectories of the provided directory are
tested, but with ``recurse >= 1`` additional levels of subdirectories
are tested for being locations.
Directory names are sorted to provide a stable result.
:param path: Path to the directory to search
:param recurse: How many additional levels of subdirectories to explore
"""
assert recurse >= 0
full_paths = [os.path.join(path, i) for i in os.listdir(path)]
candidates = sorted([i for i in full_paths if os.path.isdir(i)])
for candidate in candidates:
if is_location_valid(candidate):
return candidate
if recurse:
for candidate in candidates:
result = find_location_in_directory(candidate, recurse - 1)
if result:
return result
return None
def METHOD_NAME(url):
"""Create location name from URL"""
return gs.legalize_vector_name(name_from_url(url))
def main(options, unused_flags):
"""Download and copy location to destination"""
url = options["url"]
name = options["name"]
database = options["path"]
if not database:
# Use the current database path.
database = gs.gisenv()["GISDBASE"]
if not name:
name = METHOD_NAME(url)
destination = Path(database) / name
if destination.exists():
gs.fatal(
_("Location named <{}> already exists, download canceled").format(name)
)
gs.message(_("Downloading and extracting..."))
try:
directory = download_and_extract(url)
if not directory.is_dir():
gs.fatal(_("Archive contains only one file and no mapset directories"))
atexit.register(lambda: try_rmdir(directory))
except DownloadError as error:
gs.fatal(_("Unable to get the location: {error}").format(error=error))
if not is_location_valid(directory):
gs.verbose(_("Searching for valid location..."))
# This in fact deal with location being on the third level of directories
# thanks to how the extraction functions work (leaving out one level).
result = find_location_in_directory(directory, recurse=1)
if result:
# We just want to show relative path in the message.
# The relative path misses the root directory (name), because we
# loose it on the way. (We should use parent directory to get the
# full relative path, but the directory name is different now.
# This is the consequence of how the extract functions work.)
relative = os.path.relpath(result, start=directory)
gs.verbose(
_("Location found in a nested directory '{directory}'").format(
directory=relative
)
)
directory = result
else:
# The list is similarly misleading as the relative path above
# as it misses the root directory, but it still should be useful.
files_and_dirs = os.listdir(directory)
gs.fatal(
_(
"The downloaded file is not a valid GRASS Location."
" The extracted file contains these files and directories:"
"\n{files_and_dirs}"
).format(files_and_dirs=" ".join(files_and_dirs))
)
gs.verbose(_("Copying to final destination..."))
shutil.copytree(src=directory, dst=destination)
gs.message(_("Path to the location now <{path}>").format(path=destination))
if __name__ == "__main__":
main(*gs.parser()) |
6,368 | refresh tags | import os
import tap_tester.connections as connections
import tap_tester.menagerie as menagerie
import tap_tester.runner as runner
from functools import reduce
# TODO fix setup.py? so zenpy module is availalble on dev_vm without manually running pip install
from zenpy import Zenpy
from zenpy.lib.api_objects import Group, Organization, Tag, User
from base import ZendeskTest
class ZendeskAllStreams(ZendeskTest):
def name(self):
return "tap_tester_zendesk_all_streams"
def expected_sync_streams(self):
return {
"tickets",
"groups",
"users",
"organizations",
"ticket_audits",
"ticket_fields",
"group_memberships",
"macros",
"tags",
"ticket_metrics",
}
def expected_pks(self):
return {
"tickets": {"id"},
"groups": {"id"},
"users": {"id"},
"organizations": {"id"},
"ticket_audits": {"id"},
"ticket_fields": {"id"},
"group_memberships": {"id"},
"macros": {"id"},
"tags": {"name"},
"ticket_metrics": {"id"},
}
def METHOD_NAME(self, records):
# Zenpy client credentials to connect to API
creds = {
'email': 'dev@stitchdata.com',
'password': os.getenv('TAP_ZENDESK_API_PASSWORD'),
'subdomain': "rjmdev",
}
test_tags = ['test_tag_1', 'test_tag_2', 'test_tag_3']
# filter out closed tickets since we cannot update them to refresh thier tags
unclosed_tickets = [t for t in records.get('tickets').get('messages') if t.get('data').get('status') != 'closed']
self.assertGreaterEqual(len(unclosed_tickets), 3)
last_3_unclosed_tickets = unclosed_tickets[-3:]
zenpy_client = Zenpy(**creds)
for i, tic in enumerate(last_3_unclosed_tickets):
# remove and re-add existing tags to refresh
if tic.get('data').get('tags'):
tag_list = tic.get('data').get('tags')
zenpy_client.tickets.delete_tags(tic.get('data').get('id'), tag_list)
# replace old tags. adding the same tag does not create duplicates
zenpy_client.tickets.add_tags(tic.get('data').get('id'), tag_list)
# add / refresh test tags
zenpy_client.tickets.add_tags(tic.get('data').get('id'), test_tags[0:(i+1)])
# mark tags as refreshed as soon as we successfully get through one loop
self.tags_are_stale = False
def test_run(self):
# Default test setup
# Create the connection for Zendesk
conn_id = connections.ensure_connection(self)
# Run a check job using orchestrator
check_job_name = runner.run_check_mode(self, conn_id)
exit_status = menagerie.get_exit_status(conn_id, check_job_name)
menagerie.verify_check_exit_status(self, exit_status, check_job_name)
# Verify schemas discovered were discovered
self.found_catalogs = menagerie.get_catalogs(conn_id)
self.assertEqual(len(self.found_catalogs), len(self.expected_check_streams()))
# Verify the schemas discovered were exactly what we expect
found_catalog_names = {catalog['tap_stream_id']
for catalog in self.found_catalogs
if catalog['tap_stream_id'] in self.expected_check_streams()}
self.assertSetEqual(self.expected_check_streams(), found_catalog_names)
# Select our catalogs
our_catalogs = [c for c in self.found_catalogs if c.get('tap_stream_id') in self.expected_sync_streams()]
for c in our_catalogs:
c_annotated = menagerie.get_annotated_schema(conn_id, c['stream_id'])
c_metadata = self.to_map(c_annotated['metadata'])
connections.select_catalog_and_fields_via_metadata(conn_id, c, c_annotated, [], [])
# Clear state before our run
menagerie.set_state(conn_id, {})
# Run a sync job using orchestrator
# Verify exit status is 0 and verify rows were synced
_ = self.run_and_verify_sync(conn_id, state={})
# Verify actual rows were synced
# Ensure all records have a value for PK(s)
records = runner.get_records_from_target_output()
# assume tags are stale since we cannot query tag age / date from synced records or the API
self.tags_are_stale = True
# If all tags have aged out then refresh them and run another sync, tags not used in over
# 60 days will automatically age out. Removing and re-adding the tag will refresh it
if not records.get('tags').get('messages',[]):
self.METHOD_NAME(records)
# Run a second sync job to pick up new tags, should be faster since we haven't touched state
# Verify exit status is 0 and verify rows were synced
_ = self.run_and_verify_sync(conn_id)
# Ensure we replicated some tags records this time
tags_records = runner.get_records_from_target_output()
self.assertGreater(len(tags_records, 0))
for stream in self.expected_sync_streams():
messages = records.get(stream,{}).get('messages',[])
if stream == 'tags':
# check to see if tags were already refreshed or not
if self.tags_are_stale:
# refresh has not been run yet, this means we already have some tags records
self.METHOD_NAME(records)
else:
# tags were already refreshed so records were missing from first sync
messages = tags_records.get(stream).get('messages')
if stream in ['tickets', 'groups', 'users']:
self.assertGreater(len(messages), 100, msg="Stream {} has fewer than 100 records synced".format(stream))
for m in messages:
pk_set = self.expected_pks()[stream]
for pk in pk_set:
self.assertIsNotNone(m.get('data', {}).get(pk), msg="Missing primary-key for message {}".format(m)) |
6,369 | test adds env when enabled | #!/usr/bin/env python
# -*- coding: utf-8
# Copyright 2017-2019 The FIAAS Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
from unittest import mock
import pytest
from k8s.models.deployment import Deployment, DeploymentSpec
from k8s.models.pod import PodTemplateSpec, PodSpec, Container, EnvVar
from fiaas_deploy_daemon import Configuration
from fiaas_deploy_daemon.deployer.kubernetes.deployment import DataDog
CONTAINER_IMAGE = "datadog_container_image:tag"
CONTAINER_IMAGE_LATEST = "datadog_container_image:latest"
class TestDataDog(object):
@pytest.fixture(scope="module")
def config(self):
config = mock.create_autospec(Configuration([]), spec_set=True)
config.datadog_container_image = CONTAINER_IMAGE
config.datadog_container_memory = "2Gi"
config.datadog_global_tags = {"tag": "test"}
return config
@pytest.fixture(scope="module")
def datadog(self, config):
return DataDog(config)
@pytest.fixture
def deployment(self):
main_container = Container(env=[EnvVar(name="DUMMY", value="CANARY")])
pod_spec = PodSpec(containers=[main_container])
pod_template_spec = PodTemplateSpec(spec=pod_spec)
deployment_spec = DeploymentSpec(template=pod_template_spec)
return Deployment(spec=deployment_spec)
@pytest.fixture(params=(True, False))
def best_effort_required(self, request):
yield request.param
def test_noop_when_not_enabled(self, datadog, app_spec, deployment):
expected = deepcopy(deployment)
datadog.apply(deployment, app_spec, False, 0)
assert expected == deployment
@pytest.mark.parametrize("best_effort_required", (False, True))
def METHOD_NAME(self, datadog, app_spec, deployment, best_effort_required):
datadog_spec = app_spec.datadog._replace(enabled=True, tags={})
app_spec = app_spec._replace(datadog=datadog_spec)
datadog.apply(deployment, app_spec, best_effort_required, 0)
expected = [
{"name": "DUMMY", "value": "CANARY"},
{"name": "STATSD_HOST", "value": "localhost"},
{"name": "STATSD_PORT", "value": "8125"},
]
assert expected == deployment.as_dict()["spec"]["template"]["spec"]["containers"][0]["env"]
def test_adds_global_tags_when_enabled(self, datadog, app_spec, deployment, best_effort_required):
datadog_spec = app_spec.datadog._replace(enabled=True, tags={})
app_spec = app_spec._replace(datadog=datadog_spec)
datadog.apply(deployment, app_spec, best_effort_required, 0)
expected = {
"name": "DD_TAGS",
"value": "app:{},k8s_namespace:{},tag:test".format(app_spec.name, app_spec.namespace),
}
assert expected in deployment.as_dict()["spec"]["template"]["spec"]["containers"][1]["env"]
@pytest.mark.parametrize("name, namespace", (("bilbo", "baggins"), ("rincewind", "discworld")))
def test_adds_container_when_enabled(self, datadog, app_spec, deployment, best_effort_required, name, namespace):
datadog_spec = app_spec.datadog._replace(enabled=True, tags={"a": "1", "b": "2"})
app_spec = app_spec._replace(datadog=datadog_spec)
app_spec = app_spec._replace(datadog=datadog_spec)
app_spec = app_spec._replace(name=name, namespace=namespace)
datadog.apply(deployment, app_spec, best_effort_required, 0)
expected = {
"name": DataDog.DATADOG_CONTAINER_NAME,
"image": CONTAINER_IMAGE,
"volumeMounts": [],
"command": [],
"args": [],
"env": [
{
"name": "DD_TAGS",
"value": "a:1,app:{},b:2,k8s_namespace:{},tag:test".format(app_spec.name, app_spec.namespace),
},
{"name": "DD_API_KEY", "valueFrom": {"secretKeyRef": {"name": "datadog", "key": "apikey"}}},
{"name": "NON_LOCAL_TRAFFIC", "value": "false"},
{"name": "DD_LOGS_STDOUT", "value": "yes"},
{"name": "DD_EXPVAR_PORT", "value": "42622"},
{"name": "DD_CMD_PORT", "value": "42623"},
],
"envFrom": [],
"imagePullPolicy": "IfNotPresent",
"ports": [],
}
if not best_effort_required:
expected["resources"] = {
"limits": {"cpu": "400m", "memory": "2Gi"},
"requests": {"cpu": "200m", "memory": "2Gi"},
}
assert expected == deployment.as_dict()["spec"]["template"]["spec"]["containers"][-1]
def test_adds_correct_image_pull_policy_for_latest(self, config, app_spec, deployment):
config.datadog_container_image = CONTAINER_IMAGE_LATEST
datadog = DataDog(config)
datadog_spec = app_spec.datadog._replace(enabled=True)
app_spec = app_spec._replace(datadog=datadog_spec)
datadog.apply(deployment, app_spec, False, 0)
actual = deployment.as_dict()["spec"]["template"]["spec"]["containers"][-1]
assert actual["image"] == CONTAINER_IMAGE_LATEST
assert actual["imagePullPolicy"] == "Always"
def test_adds_lifecycle_when_pre_stop_delay_is_set_and_sleep_is_active(self, config, app_spec, deployment):
config.datadog_container_image = CONTAINER_IMAGE_LATEST
config.datadog_activate_sleep = True
datadog = DataDog(config)
datadog_spec = app_spec.datadog._replace(enabled=True)
app_spec = app_spec._replace(datadog=datadog_spec)
datadog.apply(deployment, app_spec, False, 5)
expected = {"preStop": {"exec": {"command": ["sleep", "5"]}}}
assert expected == deployment.as_dict()["spec"]["template"]["spec"]["containers"][-1]["lifecycle"]
def test_does_not_add_lifecycle_when_pre_stop_delay_is_set_and_sleep_is_not_active(
self, config, app_spec, deployment
):
config.datadog_container_image = CONTAINER_IMAGE_LATEST
config.datadog_activate_sleep = False
datadog = DataDog(config)
datadog_spec = app_spec.datadog._replace(enabled=True)
app_spec = app_spec._replace(datadog=datadog_spec)
datadog.apply(deployment, app_spec, False, 5)
assert False == ("lifecycle" in deployment.as_dict()["spec"]["template"]["spec"]["containers"][-1]) |
6,370 | set last used time | from __future__ import annotations
import dataclasses
import logging
import math
from collections import deque
from datetime import datetime
from typing import Optional, List, Dict, Deque, Set
from .config import Config
from .operator_resource_info import OpResIdent
from ..statistic.data import NeonOpResStatData
from ..statistic.proxy_client import ProxyStatClient
LOG = logging.getLogger(__name__)
@dataclasses.dataclass(frozen=True)
class OpResUsedTime:
ident: OpResIdent
last_used_time: int = 0
used_cnt: int = 0
neon_sig: str = ''
def __str__(self) -> str:
return str(self.ident)
def __hash__(self) -> int:
return hash(self.ident)
def __eq__(self, other) -> bool:
return (
isinstance(other, OpResUsedTime) and
other.ident == self.ident
)
def METHOD_NAME(self, value: int) -> None:
object.__setattr__(self, 'used_cnt', self.used_cnt + 1)
object.__setattr__(self, 'last_used_time', value)
def set_neon_sig(self, value: str) -> None:
assert len(value) > 0
object.__setattr__(self, 'neon_sig', value)
def reset_neon_sig(self) -> None:
object.__setattr__(self, 'neon_sig', '')
class OpResMng:
def __init__(self, config: Config, stat_client: ProxyStatClient):
self._secret_list: List[bytes] = []
self._res_ident_set: Set[OpResIdent] = set()
self._free_res_ident_list: Deque[OpResUsedTime] = deque()
self._used_res_ident_dict: Dict[str, OpResUsedTime] = dict()
self._disabled_res_ident_list: Deque[OpResIdent] = deque()
self._checked_res_ident_set: Set[OpResIdent] = set()
self._stat_client = stat_client
self._config = config
self._last_check_time = 0
def init_resource_list(self, res_ident_list: List[OpResIdent]) -> None:
old_res_cnt = self.resource_cnt
new_ident_set: Set[OpResIdent] = set(res_ident_list)
rm_ident_set: Set[OpResIdent] = self._res_ident_set.difference(new_ident_set)
add_ident_set: Set[OpResIdent] = new_ident_set.difference(self._res_ident_set)
if (len(rm_ident_set) == 0) and (len(add_ident_set) == 0):
LOG.debug(f'Same resource list')
return
self._res_ident_set = new_ident_set
self._free_res_ident_list = deque([res for res in self._free_res_ident_list if res.ident not in rm_ident_set])
self._disabled_res_ident_list = deque([res for res in self._disabled_res_ident_list if res not in rm_ident_set])
self._checked_res_ident_set = {res for res in self._checked_res_ident_set if res not in rm_ident_set}
for res in rm_ident_set:
LOG.debug(f'Remove resource {res}')
for res in add_ident_set:
LOG.debug(f'Add resource {res}')
self._disabled_res_ident_list.append(res)
self._secret_list: List[bytes] = [pk for pk in {res.private_key for res in self._res_ident_set}]
if old_res_cnt != self.resource_cnt != 0:
LOG.debug(f'Change number of resources from {old_res_cnt} to {self.resource_cnt}')
self._commit_stat()
@property
def resource_cnt(self) -> int:
return len(self._res_ident_set)
@staticmethod
def _get_current_time() -> int:
return math.ceil(datetime.now().timestamp())
def _get_resource_impl(self, neon_sig: str) -> Optional[OpResUsedTime]:
res_used_time = self._used_res_ident_dict.get(neon_sig, None)
if res_used_time is not None:
LOG.debug(f'Reuse resource {res_used_time} for tx {neon_sig}')
return res_used_time
if len(self._free_res_ident_list) > 0:
res_used_time = self._free_res_ident_list.popleft()
self._used_res_ident_dict[neon_sig] = res_used_time
res_used_time.set_neon_sig(neon_sig)
LOG.debug(f'Use resource {res_used_time} for tx {neon_sig}')
self._commit_stat()
return res_used_time
return None
def _pop_used_resource(self, neon_sig: str) -> Optional[OpResUsedTime]:
res_used_time = self._used_res_ident_dict.pop(neon_sig, None)
if (res_used_time is None) or (res_used_time.ident not in self._res_ident_set):
LOG.debug(f'Skip resource {str(res_used_time)} for tx {neon_sig}')
return None
self._commit_stat()
res_used_time.reset_neon_sig()
return res_used_time
def get_resource(self, neon_sig: str) -> Optional[OpResIdent]:
res_used_time = self._get_resource_impl(neon_sig)
if res_used_time is None:
return None
now = self._get_current_time()
res_used_time.METHOD_NAME(now)
return res_used_time.ident
def update_resource(self, neon_sig: str) -> None:
res_used_time = self._used_res_ident_dict.get(neon_sig, None)
if res_used_time is not None:
LOG.debug(f'Update time for resource {res_used_time}')
now = self._get_current_time()
res_used_time.METHOD_NAME(now)
def release_resource(self, neon_sig: str) -> Optional[OpResIdent]:
res_used_time = self._pop_used_resource(neon_sig)
if res_used_time is None:
return None
recheck_cnt = self._config.recheck_resource_after_uses_cnt
if res_used_time.used_cnt > recheck_cnt:
LOG.debug(f'Recheck resource {res_used_time} by counter')
self._disabled_res_ident_list.append(res_used_time.ident)
else:
LOG.debug(f'Release resource {res_used_time}')
self._free_res_ident_list.append(res_used_time)
self._commit_stat()
return res_used_time.ident
def disable_resource(self, ident: OpResIdent) -> None:
LOG.debug(f'Disable resource {ident}')
self._checked_res_ident_set.discard(ident)
self._disabled_res_ident_list.append(ident)
self._commit_stat()
def enable_resource(self, ident: OpResIdent) -> None:
if ident not in self._res_ident_set:
LOG.debug(f'Skip resource {ident}')
return
LOG.debug(f'Enable resource {ident}')
self._checked_res_ident_set.discard(ident)
self._free_res_ident_list.append(OpResUsedTime(ident=ident))
self._commit_stat()
def get_secret_list(self) -> List[bytes]:
return self._secret_list
def _check_used_resource_list(self) -> None:
now = self._get_current_time()
recheck_sec = self._config.recheck_used_resource_sec
check_time = now - recheck_sec
if self._last_check_time > check_time:
return
self._last_check_time = now
for neon_sig, res_used_time in list(self._used_res_ident_dict.items()):
if res_used_time.last_used_time > check_time:
continue
res_used_time = self._pop_used_resource(neon_sig)
if res_used_time is None:
continue
LOG.debug(f'Recheck resource {res_used_time} by time usage')
self._disabled_res_ident_list.append(res_used_time.ident)
def get_disabled_resource(self) -> Optional[OpResIdent]:
if len(self._disabled_res_ident_list) == 0:
return None
ident = self._disabled_res_ident_list.popleft()
LOG.debug(f'Recheck resource {ident}')
self._checked_res_ident_set.add(ident)
self._commit_stat()
return ident
def _commit_stat(self) -> None:
stat = NeonOpResStatData(
secret_cnt=len(self._secret_list),
total_res_cnt=len(self._res_ident_set),
free_res_cnt=len(self._free_res_ident_list),
used_res_cnt=len(self._used_res_ident_dict),
disabled_res_cnt=len(self._disabled_res_ident_list)
)
self._stat_client.commit_op_res_stat(stat) |
6,371 | clean prompt | import json
import re
from pydantic.types import List
from superagi.helper.token_counter import TokenCounter
from superagi.tools.base_tool import BaseTool
FINISH_NAME = "finish"
class AgentPromptBuilder:
"""Agent prompt builder for LLM agent."""
@staticmethod
def add_list_items_to_string(items: List[str]) -> str:
list_string = ""
for i, item in enumerate(items):
list_string += f"{i + 1}. {item}\n"
return list_string
@classmethod
def add_tools_to_prompt(cls, tools: List[BaseTool], add_finish: bool = True) -> str:
"""Add tools to the prompt.
Args:
tools (List[BaseTool]): The list of tools.
add_finish (bool): Whether to add finish tool or not.
"""
final_string = ""
print(tools)
for i, item in enumerate(tools):
final_string += f"{i + 1}. {cls._generate_tool_string(item)}\n"
finish_description = (
"use this to signal that you have finished all your objectives"
)
finish_args = (
'"response": "final response to let '
'people know you have finished your objectives"'
)
finish_string = (
f"{len(tools) + 1}. \"{FINISH_NAME}\": "
f"{finish_description}, args: {finish_args}"
)
if add_finish:
final_string = final_string + finish_string + "\n\n"
else:
final_string = final_string + "\n"
return final_string
@classmethod
def _generate_tool_string(cls, tool: BaseTool) -> str:
output = f"\"{tool.name}\": {tool.description}"
# print(tool.args)
output += f", args json schema: {json.dumps(tool.args)}"
return output
@classmethod
def METHOD_NAME(cls, prompt):
prompt = re.sub('[ \t]+', ' ', prompt)
return prompt.strip()
@classmethod
def replace_main_variables(cls, super_agi_prompt: str, goals: List[str], instructions: List[str], constraints: List[str],
tools: List[BaseTool], add_finish_tool: bool = True):
"""Replace the main variables in the super agi prompt.
Args:
super_agi_prompt (str): The super agi prompt.
goals (List[str]): The list of goals.
instructions (List[str]): The list of instructions.
constraints (List[str]): The list of constraints.
tools (List[BaseTool]): The list of tools.
add_finish_tool (bool): Whether to add finish tool or not.
"""
super_agi_prompt = super_agi_prompt.replace("{goals}", AgentPromptBuilder.add_list_items_to_string(goals))
if len(instructions) > 0 and len(instructions[0]) > 0:
task_str = "INSTRUCTION(Follow these instruction to decide the flow of execution and decide the next steps for achieving the task):"
super_agi_prompt = super_agi_prompt.replace("{instructions}", "INSTRUCTION: " + '\n' + AgentPromptBuilder.add_list_items_to_string(instructions))
super_agi_prompt = super_agi_prompt.replace("{task_instructions}", task_str + '\n' + AgentPromptBuilder.add_list_items_to_string(instructions))
else:
super_agi_prompt = super_agi_prompt.replace("{instructions}", '')
super_agi_prompt = super_agi_prompt.replace("{task_instructions}", "")
super_agi_prompt = super_agi_prompt.replace("{constraints}",
AgentPromptBuilder.add_list_items_to_string(constraints))
# logger.info(tools)
tools_string = AgentPromptBuilder.add_tools_to_prompt(tools, add_finish_tool)
super_agi_prompt = super_agi_prompt.replace("{tools}", tools_string)
return super_agi_prompt
@classmethod
def replace_task_based_variables(cls, super_agi_prompt: str, current_task: str, last_task: str,
last_task_result: str, pending_tasks: List[str], completed_tasks: list, token_limit: int):
"""Replace the task based variables in the super agi prompt.
Args:
super_agi_prompt (str): The super agi prompt.
current_task (str): The current task.
last_task (str): The last task.
last_task_result (str): The last task result.
pending_tasks (List[str]): The list of pending tasks.
completed_tasks (list): The list of completed tasks.
token_limit (int): The token limit.
"""
if "{current_task}" in super_agi_prompt:
super_agi_prompt = super_agi_prompt.replace("{current_task}", current_task)
if "{last_task}" in super_agi_prompt:
super_agi_prompt = super_agi_prompt.replace("{last_task}", last_task)
if "{last_task_result}" in super_agi_prompt:
super_agi_prompt = super_agi_prompt.replace("{last_task_result}", last_task_result)
if "{pending_tasks}" in super_agi_prompt:
super_agi_prompt = super_agi_prompt.replace("{pending_tasks}", str(pending_tasks))
completed_tasks.reverse()
if "{completed_tasks}" in super_agi_prompt:
completed_tasks_arr = []
for task in completed_tasks:
completed_tasks_arr.append(task['task'])
super_agi_prompt = super_agi_prompt.replace("{completed_tasks}", str(completed_tasks_arr))
base_token_limit = TokenCounter.count_message_tokens([{"role": "user", "content": super_agi_prompt}])
pending_tokens = token_limit - base_token_limit
final_output = ""
if "{task_history}" in super_agi_prompt:
for task in reversed(completed_tasks[-10:]):
final_output = f"Task: {task['task']}\nResult: {task['response']}\n" + final_output
token_count = TokenCounter.count_message_tokens([{"role": "user", "content": final_output}])
# giving buffer of 100 tokens
if token_count > min(600, pending_tokens):
break
super_agi_prompt = super_agi_prompt.replace("{task_history}", "\n" + final_output + "\n")
return super_agi_prompt |
6,372 | test connection is allowed | import pytest
from gaphor import UML
from gaphor.diagram.connectors import Connector
from gaphor.diagram.tests.fixtures import allow, connect, disconnect
from gaphor.SysML import sysml
from gaphor.SysML.blocks.block import BlockItem
from gaphor.SysML.blocks.connectors import BlockProperyProxyPortConnector
from gaphor.SysML.blocks.proxyport import ProxyPortItem
from gaphor.SysML.blocks.property import PropertyItem
from gaphor.UML.deployments import ConnectorItem
@pytest.fixture
def block_item(diagram, element_factory):
return diagram.create(BlockItem, subject=element_factory.create(sysml.Block))
@pytest.fixture
def property_item(diagram, element_factory):
type = element_factory.create(sysml.Block)
prop = diagram.create(PropertyItem, subject=element_factory.create(sysml.Property))
prop.subject.type = type
return prop
@pytest.fixture
def proxy_port_item(diagram):
return diagram.create(ProxyPortItem)
def connected_proxy_port_item(diagram, element_factory):
proxy_port_item = diagram.create(ProxyPortItem)
block_item = diagram.create(BlockItem, subject=element_factory.create(sysml.Block))
connector = Connector(block_item, proxy_port_item)
connector.connect(proxy_port_item.handles()[0], block_item.ports()[0])
return proxy_port_item
@pytest.fixture
def head_proxy_port_item(diagram, element_factory):
return connected_proxy_port_item(diagram, element_factory)
@pytest.fixture
def tail_proxy_port_item(diagram, element_factory):
return connected_proxy_port_item(diagram, element_factory)
@pytest.fixture
def other_proxy_port_item(diagram, element_factory):
return connected_proxy_port_item(diagram, element_factory)
@pytest.fixture
def connector_item(diagram):
return diagram.create(ConnectorItem)
def METHOD_NAME(block_item, proxy_port_item):
connector = Connector(block_item, proxy_port_item)
assert isinstance(connector, BlockProperyProxyPortConnector)
assert connector.allow(proxy_port_item.handles()[0], block_item.ports()[0])
def test_connect_proxy_port_to_block(block_item, proxy_port_item):
connector = Connector(block_item, proxy_port_item)
connected = connector.connect(proxy_port_item.handles()[0], block_item.ports()[0])
assert connected
assert proxy_port_item.subject
assert proxy_port_item.subject.encapsulatedClassifier is block_item.subject
assert proxy_port_item.subject in block_item.subject.ownedPort
def test_disconnect_proxy_port_to_block(block_item, proxy_port_item):
connector = Connector(block_item, proxy_port_item)
connector.connect(proxy_port_item.handles()[0], block_item.ports()[0])
connector.disconnect(proxy_port_item.handles()[0])
assert proxy_port_item.subject is None
assert proxy_port_item.diagram
def test_connect_proxy_port_to_property(property_item, proxy_port_item):
connector = Connector(property_item, proxy_port_item)
connected = connector.connect(
proxy_port_item.handles()[0], property_item.ports()[0]
)
assert connected
assert proxy_port_item.subject
assert proxy_port_item.subject.encapsulatedClassifier is property_item.subject.type
assert proxy_port_item.subject in property_item.subject.type.ownedPort
def test_allow_connector_to_proxy_port(
connector_item: ConnectorItem, head_proxy_port_item: ProxyPortItem
):
assert allow(connector_item, connector_item.handles()[0], head_proxy_port_item)
def test_connect_connector_on_one_end_to_proxy_port(
connector_item: ConnectorItem, head_proxy_port_item: ProxyPortItem
):
connect(connector_item, connector_item.handles()[0], head_proxy_port_item)
assert connector_item.subject is None
def test_connect_connector_on_both_ends_to_proxy_port(
connector_item: ConnectorItem,
head_proxy_port_item: ProxyPortItem,
tail_proxy_port_item: ProxyPortItem,
):
connect(connector_item, connector_item.handles()[0], head_proxy_port_item)
connect(connector_item, connector_item.handles()[1], tail_proxy_port_item)
assert connector_item.subject
assert head_proxy_port_item.subject in connector_item.subject.end[:].role
assert tail_proxy_port_item.subject in connector_item.subject.end[:].role
def test_disconnect_connector_from_proxy_port(
connector_item: ConnectorItem,
head_proxy_port_item: ProxyPortItem,
tail_proxy_port_item: ProxyPortItem,
element_factory,
):
connect(connector_item, connector_item.handles()[0], head_proxy_port_item)
connect(connector_item, connector_item.handles()[1], tail_proxy_port_item)
disconnect(connector_item, connector_item.handles()[0])
assert not connector_item.subject
assert element_factory.lselect(UML.Connector) == []
assert element_factory.lselect(UML.ConnectorEnd) == []
assert head_proxy_port_item.subject in element_factory.select(UML.Port)
assert tail_proxy_port_item.subject in element_factory.select(UML.Port) |
6,373 | instantiate urban observatory wind data | ###########################################
# Authors: Toby Latcham (tjl47@cam.ac.uk) #
# Sophie Hall (sh2000@cam.ac.uk) #
# Date: 11 Feb 2022 #
###########################################
import uuid
from owlready2 import *
# Data Reader and data retrieval modules
from Utils.data_reader_module import *
from Utils.urban_observatory_data_retrieval_module import *
# Instantiation modules
from Utils.urban_observatory_wind_module import *
# Get the JVM module view (via jpsBaseLibGateWay instance) from the jpsSingletons module to access
# the TimeSeriesClient in the JPB_BASE_LIB
from Utils.jpsSingletons import jpsBaseLibView
# Get settings and functions from utils module
import Utils.utils as utils
#def initialise_knowledge_graph():
#'''Initialises the knowledge graph.'''
# Create specified PostgreSQL database
#utils.create_postgres_db()
# Create specified Blazegraph namespace
#utils.create_blazegraph_namespace()
def extract_data():
'''Extract all desired data from Newcastle Observatory API'''
units = {'WindSpeed': 'm/s'}
wind_speed_response = retrieve_data('Wind Speed', 'Wind Speed', datetime.datetime(2022, 1, 10, 0), datetime.datetime(2022, 1, 11), '2mins')
return wind_speed_response, units
def process_data(wind_speed_response):
'''Processes the data and returns it in the proper format.'''
wind_sensor_names = process_wind_speed_data(wind_speed_response, 'newcastle_data_wind.csv')
wind_sensors = format_wind_sensors(wind_sensor_names)
return wind_sensors
def instantiate_newcastle_sensors(wind_sensors, units):
'''Instantiates all extracted Newcastle Observatory sensors'''
# Initialise remote KG client with query AND update endpoints specified
KGClient = jpsBaseLibView.RemoteStoreClient(utils.QUERY_ENDPOINT, utils.UPDATE_ENDPOINT)
# Retrieve Java classes for time entries (Instant) and data (ALL Double)
# (required for time series client instantiation)
Instant = jpsBaseLibView.java.time.Instant
instant_class = Instant.now().getClass()
double_class = jpsBaseLibView.java.lang.Double.TYPE
# Loop over all sensors
for sensor in wind_sensors:
instantiate_wind_sensor(sensor, units, KGClient, instant_class, double_class)
def instantiate_wind_sensor(sensor, units, KGClient, instant_class, double_class):
'''Instantiates a given wind sensor.'''
print('Current sensor: ', sensor['sensor name'])
ts = list(sensor['timeseries'].keys())[0]
# Create IRI for current sensor
sensorIRI = utils.PREFIXES['ssn'] + 'Sensor_' + str(uuid.uuid4())
# Create IRI for time series
dataIRI = utils.PREFIXES['ssn'] + ts + '_' + str(uuid.uuid4())
# Initialise list of dataIRIs, which will be represented as time series
dataIRIs = [dataIRI]
# 1) Perform SPARQL update for non-time series related triples (i.e. without TimeSeriesClient)
query = utils.create_sparql_prefix('ontoweather') + \
utils.create_sparql_prefix('rdf') + \
utils.create_sparql_prefix('ssn') + \
utils.create_sparql_prefix('om') + \
utils.create_sparql_prefix('geolit') + \
'''INSERT DATA { \
<%s> rdf:type ssn:Sensor ; \
ontoweather:hasName "%s" ; \
ontoweather:observesWindProperty <%s> ; \
ontoweather:WGS84LatitudeLongitude "%s . \
<%s> rdf:type ontoweather:MeanWindSpeed ; \
ontoweather:hasTimeseries "%s" ; \
ontoweather:hasDescription "%s" ; \
om:hasUnit "%s" . }''' % (sensorIRI,
sensor['sensor name'],
dataIRI,
sensor['lat'] + '#' + sensor['lon'] + '\"^^geolit:lat-lon',
dataIRI,
ts + ' measurement',
"INSERT DESCRIPTION HERE",
units[ts])
KGClient.executeUpdate(query)
print("Triples independent of Java TimeSeriesClient successfully instantiated.")
# 2) Perform SPARQL update for time series related triples (i.e. via TimeSeriesClient)
# Initialise time series in both KG and RDB using TimeSeriesClass
TSClient = jpsBaseLibView.TimeSeriesClient(instant_class, utils.PROPERTIES_FILE)
TSClient.initTimeSeries(dataIRIs, [double_class] * len(dataIRIs), utils.FORMAT)
print("Time series triples via Java TimeSeriesClient successfully instantiated.")
# 3) Add actual time series data
# Create Java TimeSeries object with data to attach
times = sensor['times']
values = [sensor['timeseries'][v] for v in list(sensor['timeseries'].keys())]
timeseries = jpsBaseLibView.TimeSeries(times, dataIRIs, values)
# Add data
TSClient.addTimeSeriesData(timeseries)
print("Time series data successfully added.\n")
def METHOD_NAME():
'''Extracts and instantiates all desired Newcastle Observatory data'''
wind_speed_response, units = extract_data()
wind_sensors = process_data(wind_speed_response)
instantiate_newcastle_sensors(wind_sensors, units)
print("All sensors successfully instantiated")
if __name__ == '__main__':
METHOD_NAME( |
6,374 | to phone | # Copyright (c) Alibaba, Inc. and its affiliates.
import os
import random
from pathlib import Path
from typing import Any, Dict
import librosa
import soundfile as sf
import torch
from fairseq.data.audio.feature_transforms import \
CompositeAudioFeatureTransform
from fairseq.data.audio.speech_to_text_dataset import S2TDataConfig
from modelscope.utils.chinese_utils import pre_chinese
from modelscope.utils.constant import ModeKeys
from .base import OfaBasePreprocessor
from .utils.text2phone import Text2Phone
class OfaASRPreprocessor(OfaBasePreprocessor):
def __init__(self,
cfg,
model_dir,
mode=ModeKeys.INFERENCE,
*args,
**kwargs):
"""preprocess the data
Args:
cfg(modelscope.utils.config.ConfigDict) : model config
model_dir (str): model path,
mode: preprocessor mode (model mode)
"""
super(OfaASRPreprocessor, self).__init__(cfg, model_dir, mode, *args,
**kwargs)
# Initialize transform
self.data_cfg = S2TDataConfig(
Path(os.path.join(model_dir, 'fbank_config.yaml')))
self.train_audio_feature_transforms = CompositeAudioFeatureTransform.from_config_dict(
self.data_cfg.get_feature_transforms('train', True))
self.test_audio_feature_transforms = CompositeAudioFeatureTransform.from_config_dict(
self.data_cfg.get_feature_transforms('test', False))
self.text2phone_tokenizer = Text2Phone(
os.path.join(model_dir, 'text2phone_dict.txt'))
self.phone_to_id, self.id_to_phone = self.build_phone_dict(
os.path.join(model_dir, 'phone_dict.txt'))
def __call__(self, data: Dict[str, Any]) -> Dict[str, Any]:
if self.mode == ModeKeys.TRAIN:
return self._build_train_sample(data)
else:
return self._build_infer_sample(data)
def _build_train_sample(self, data: Dict[str, Any]) -> Dict[str, Any]:
speed = random.choice([0.9, 1.0, 1.1])
audio_bytes = self.get_audio_bytes(data[self.column_map['wav']])
wav, sr = librosa.load(audio_bytes, 16000, mono=True)
fbank = self.prepare_fbank(
torch.tensor([wav], dtype=torch.float32),
sr,
speed,
target_sample_rate=16000,
is_train=True)
fbank_mask = torch.tensor([True])
sample = {
'fbank': fbank,
'fbank_mask': fbank_mask,
'label': data[self.column_map['text']]
}
target = sample['label']
if self.language == 'zh':
target = pre_chinese(target, self.max_tgt_length)
sample['target'] = self.tokenize_text(target, add_bos=False)
else:
target = target.translate(self.transtab).strip()
target_token_list = target.strip().split()
target = ' '.join(target_token_list[:self.max_tgt_length])
sample['target'] = self.tokenize_text(target, add_bos=False)
phone_item = self.METHOD_NAME(target) + 1
phone_mask = torch.tensor([False])
sample['phone_item'] = phone_item + 3
sample['phone_target'] = phone_item
sample['phone_mask'] = phone_mask
sample['prev_output_tokens'] = torch.cat(
[self.bos_item, sample['target'][:-1]])
return sample
def _build_infer_sample(self, data: Dict[str, Any]) -> Dict[str, Any]:
speed = 1.0
audio_bytes = self.get_audio_bytes(data[self.column_map['wav']])
wav, sr = librosa.load(audio_bytes, 16000, mono=True)
fbank = self.prepare_fbank(
torch.tensor([wav], dtype=torch.float32),
sr,
speed,
target_sample_rate=16000,
is_train=False)
fbank_mask = torch.tensor([True])
sample = {'fbank': fbank, 'fbank_mask': fbank_mask}
if 'text' in self.column_map and self.column_map['text'] in data:
sample['label'] = data[self.column_map['text']]
# mock
sample['phone_item'] = torch.tensor([6, 6, 6])
sample['phone_mask'] = torch.tensor([False])
return sample
def METHOD_NAME(self, text):
phones = self.text2phone_tokenizer.trans(text)
ids = torch.tensor([self.phone_to_id[x] for x in phones.split(' ')])
return ids
def build_phone_dict(self, phone_dict_path):
phone_to_id = dict()
id_to_phone = dict()
with open(phone_dict_path, 'r') as phone_dict_file:
for i, line in enumerate(phone_dict_file):
phone = line.strip().split(' ')[0]
phone_to_id[phone] = i
id_to_phone[i] = phone_to_id
return phone_to_id, id_to_phone |
6,375 | merge dict | #!/usr/bin/env python3
from __future__ import print_function
import argparse
import boto3
import botocore
import glob
import os
import requests
import yaml
import io
from shutil import copyfile
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping
def METHOD_NAME(source, target):
source = source.copy()
for k, v in target.items():
if (k in source and isinstance(source[k], Mapping)
and isinstance(target[k], Mapping)):
source[k] = METHOD_NAME(source[k], target[k])
else:
source[k] = target[k]
return source
def fetch_http(source):
configs = []
res = requests.get(source)
if res.status_code != 200:
raise requests.HTTPError("Could not get %s, Response code: %s, Reason: %s" % (source, res.status_code, res.reason))
configs.append(dict(src=source, contents=res.text))
return configs
def fetch_s3(source):
configs = []
if source.startswith('s3://'):
source = source.replace('s3://', '')
s3 = boto3.resource('s3')
bucket_name = source.split('/')[0]
s3_key = '/'.join(source.split('/')[1:])
try:
outbuff = io.BytesIO()
s3.Bucket(bucket_name).download_fileobj(s3_key, outbuff)
data = outbuff.getvalue()
configs.append(dict(src=source, contents=data))
outbuff.close()
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
print("The object %s does not exist in bucket: %s" % (s3_key, bucket_name))
raise
else:
raise
return configs
def fetch_files(source):
configs = []
if source.startswith('file://'):
source = source.replace('file://', '')
matched_files = []
source = os.path.expanduser(source)
filenames = []
if '*' in source:
filenames = sorted(glob.glob(source), key=os.path.abspath)
matched_files += [f for f in filenames if os.path.isfile(f)]
elif os.path.isdir(source):
filenames = [os.path.join(source, f) for f in os.listdir(source)]
filenames = sorted(filenames, key=os.path.abspath)
matched_files += [f for f in filenames if os.path.isfile(f)]
elif os.path.isfile(source):
matched_files.append(source)
for src in matched_files:
with open(src, 'r') as f:
configs.append(dict(src=src, contents=f.read()))
return configs
def fetch_merged_config(source):
sources = source.split(',')
raw_configs = []
for src in sources:
if not src:
continue
if src.startswith('file://'):
raw_configs += fetch_files(src)
elif src.startswith('s3://'):
raw_configs += fetch_s3(src)
elif src.startswith('http://') or src.startswith('https://'):
raw_configs += fetch_http(src)
config = {}
for rc in raw_configs:
c = yaml.safe_load(rc['contents'])
if c is None:
continue
elif not isinstance(c, Mapping):
raise ValueError("Invalid Yaml content: %s" % rc['src'])
config = METHOD_NAME(config, c)
return config
def main():
parser = argparse.ArgumentParser(description='Fetch configuration yaml from different data sources and merge them into one config file')
parser.add_argument('--source', type=str, help='comma delimited source URIs')
parser.add_argument('--out', type=str, help='Output config file')
args = parser.parse_args()
if not args.source:
exit(parser.print_usage())
config = fetch_merged_config(args.source)
out = args.out
if not out:
print(yaml.safe_dump(config, default_flow_style=False))
else:
out = os.path.expanduser(out)
parent_dir = os.path.dirname(out)
if parent_dir and not os.path.isdir(parent_dir):
os.makedirs(parent_dir)
with open(out, 'w') as f:
if config:
yaml.safe_dump(config, stream=f, default_flow_style=False)
if __name__ == '__main__':
main() |
6,376 | print free text | from enum import Enum
from typing import Optional
from strictdoc.backend.sdoc.models.anchor import Anchor
from strictdoc.backend.sdoc.models.document import Document
from strictdoc.backend.sdoc.models.inline_link import InlineLink
from strictdoc.backend.sdoc.models.requirement import Requirement
from strictdoc.backend.sdoc.models.section import FreeText, Section
from strictdoc.core.document_iterator import DocumentCachingIterator
from strictdoc.core.traceability_index import TraceabilityIndex
from strictdoc.export.rst.rst_templates import RSTTemplates
class TAG(Enum):
SECTION = 1
REQUIREMENT = 2
COMPOSITE_REQUIREMENT = 3
class RSTWriter:
def __init__(self, index: TraceabilityIndex):
self.index = index
def write(self, document: Document, single_document: bool) -> str:
document_iterator = DocumentCachingIterator(document)
output = ""
if not single_document:
document_uid: Optional[str] = None
if document.config.uid is not None:
document_uid = document.config.uid
output += self._print_rst_header(
document.title, 0, reference_uid=document_uid
)
for free_text in document.free_texts:
output += self.METHOD_NAME(free_text)
for content_node in document_iterator.all_content():
if isinstance(content_node, Section):
output += self._print_rst_header(
content_node.title,
content_node.ng_level,
content_node.reserved_uid,
)
for free_text in content_node.free_texts:
output += self.METHOD_NAME(free_text)
elif isinstance(content_node, Requirement):
output += self._print_requirement_fields(content_node)
if output.endswith("\n\n"):
output = output[:-1]
return output.lstrip()
@staticmethod
def _print_rst_header_2(string: str, level: int):
assert isinstance(string, str), string
assert isinstance(level, int), level
chars = {
0: "$",
1: "=",
2: "-",
3: "~",
4: "^",
5: '"',
6: "#",
7: "'",
}
header_char = chars[level]
output = ""
output += string
output += "\n"
output += header_char.rjust(len(string), header_char)
return output
@staticmethod
def _print_rst_header(
string: str, level: int, reference_uid: Optional[str]
):
assert isinstance(string, str), string
assert isinstance(level, int), level
chars = {
0: "$",
1: "=",
2: "-",
3: "~",
4: "^",
5: '"',
6: "#",
7: "'",
}
header_char = chars[level]
output = ""
# An RST reference looks like this:
# .. _SDOC-HIGH-VALIDATION:
if reference_uid is not None:
assert len(reference_uid) > 0, reference_uid
output += f".. _{reference_uid}:\n\n"
output += string
output += "\n"
output += header_char.rjust(len(string), header_char)
output += "\n\n"
return output
def _print_requirement_fields(self, section_content: Requirement):
requirement_template = RSTTemplates.jinja_environment.get_template(
"requirement.jinja.rst"
)
output = requirement_template.render(
requirement=section_content,
index=self.index,
_print_rst_header=self._print_rst_header_2,
)
return output
def METHOD_NAME(self, free_text):
assert isinstance(free_text, FreeText)
if len(free_text.parts) == 0:
return ""
output = ""
for part in free_text.parts:
if isinstance(part, str):
output += part
elif isinstance(part, InlineLink):
anchor_or_none = self.index.get_anchor_by_uid_weak(part.link)
# Labels that aren’t placed before a section title can still be
# referenced, but you must give the link an explicit title,
# using this syntax: :ref:`Link title <label-name>`.
# https://www.sphinx-doc.org/en/master/usage/restructuredtext/roles.html
if anchor_or_none:
anchor_text = (
anchor_or_none.title
if anchor_or_none.title is not None
else anchor_or_none.value
)
output += f":ref:`{anchor_text} <{part.link}>`"
else:
output += f":ref:`{part.link}`"
elif isinstance(part, Anchor):
output += f".. _{part.value}:\n"
else:
raise NotImplementedError
output += "\n"
return output |
6,377 | on value decode error | import abc
import asyncio
import typing
from typing import Any, AsyncIterator, Awaitable, Generic, Optional, Set, TypeVar
from mode import Seconds
from mode.utils.futures import stampede
from mode.utils.queues import ThrowableQueue
from .codecs import CodecArg
from .core import HeadersArg, K, V
from .tuples import TP, FutureMessage, Message, MessageSentCallback, RecordMetadata
_T = TypeVar("_T")
_T_contra = TypeVar("_T_contra", contravariant=True)
if typing.TYPE_CHECKING:
from .app import AppT as _AppT
from .events import EventT as _EventT
from .models import ModelArg as _ModelArg
from .serializers import SchemaT as _SchemaT
from .streams import StreamT as _StreamT
else:
class _AppT:
... # noqa
class _EventT(Generic[_T]):
... # noqa
class _ModelArg:
... # noqa
class _SchemaT:
... # noqa
class _StreamT:
... # noqa
class ChannelT(AsyncIterator[_EventT[_T]]):
app: _AppT
schema: _SchemaT
key_type: Optional[_ModelArg]
value_type: Optional[_ModelArg]
loop: Optional[asyncio.AbstractEventLoop]
maxsize: Optional[int]
active_partitions: Optional[Set[TP]]
@abc.abstractmethod
def __init__(
self,
app: _AppT,
*,
schema: Optional[_SchemaT] = None,
key_type: _ModelArg = None,
value_type: _ModelArg = None,
is_iterator: bool = False,
queue: Optional[ThrowableQueue] = None,
maxsize: Optional[int] = None,
root: "ChannelT" = None,
active_partitions: Optional[Set[TP]] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
) -> None:
...
@abc.abstractmethod
def clone(
self, *, is_iterator: Optional[bool] = None, **kwargs: Any
) -> "ChannelT[_T]":
...
@abc.abstractmethod
def clone_using_queue(self, queue: asyncio.Queue) -> "ChannelT[_T]":
...
@abc.abstractmethod
def stream(self, **kwargs: Any) -> "_StreamT[_T]":
...
@abc.abstractmethod
def get_topic_name(self) -> str:
...
@abc.abstractmethod
async def send(
self,
*,
key: K = None,
value: V = None,
partition: Optional[int] = None,
timestamp: Optional[float] = None,
headers: HeadersArg = None,
schema: Optional[_SchemaT] = None,
key_serializer: CodecArg = None,
value_serializer: CodecArg = None,
callback: Optional[MessageSentCallback] = None,
force: bool = False,
) -> Awaitable[RecordMetadata]:
...
@abc.abstractmethod
def send_soon(
self,
*,
key: K = None,
value: V = None,
partition: Optional[int] = None,
timestamp: Optional[float] = None,
headers: HeadersArg = None,
schema: Optional[_SchemaT] = None,
key_serializer: CodecArg = None,
value_serializer: CodecArg = None,
callback: Optional[MessageSentCallback] = None,
force: bool = False,
eager_partitioning: bool = False,
) -> FutureMessage:
...
@abc.abstractmethod
def as_future_message(
self,
key: K = None,
value: V = None,
partition: Optional[int] = None,
timestamp: Optional[float] = None,
headers: HeadersArg = None,
schema: Optional[_SchemaT] = None,
key_serializer: CodecArg = None,
value_serializer: CodecArg = None,
callback: Optional[MessageSentCallback] = None,
eager_partitioning: bool = False,
) -> FutureMessage:
...
@abc.abstractmethod
async def publish_message(
self, fut: FutureMessage, wait: bool = True
) -> Awaitable[RecordMetadata]:
...
@stampede
@abc.abstractmethod
async def maybe_declare(self) -> None:
...
@abc.abstractmethod
async def declare(self) -> None:
...
@abc.abstractmethod
def prepare_key(
self, key: K, key_serializer: CodecArg, schema: Optional[_SchemaT] = None
) -> Any:
...
@abc.abstractmethod
def prepare_value(
self, value: V, value_serializer: CodecArg, schema: Optional[_SchemaT] = None
) -> Any:
...
@abc.abstractmethod
async def decode(self, message: Message, *, propagate: bool = False) -> _EventT[_T]:
...
@abc.abstractmethod
async def deliver(self, message: Message) -> None:
...
@abc.abstractmethod
async def put(self, value: _EventT[_T]) -> None:
...
@abc.abstractmethod
async def get(self, *, timeout: Optional[Seconds] = None) -> _EventT[_T]:
...
@abc.abstractmethod
def empty(self) -> bool:
...
@abc.abstractmethod
async def on_key_decode_error(self, exc: Exception, message: Message) -> None:
...
@abc.abstractmethod
async def METHOD_NAME(self, exc: Exception, message: Message) -> None:
...
@abc.abstractmethod
async def on_decode_error(self, exc: Exception, message: Message) -> None:
...
@abc.abstractmethod
def on_stop_iteration(self) -> None:
...
@abc.abstractmethod
def __aiter__(self) -> "ChannelT":
...
@abc.abstractmethod
def __anext__(self) -> Awaitable[_EventT[_T]]:
...
@abc.abstractmethod
async def throw(self, exc: BaseException) -> None:
...
@abc.abstractmethod
def _throw(self, exc: BaseException) -> None:
...
@abc.abstractmethod
def derive(self, **kwargs: Any) -> "ChannelT":
...
@property
@abc.abstractmethod
def subscriber_count(self) -> int:
...
@property
@abc.abstractmethod
def queue(self) -> ThrowableQueue:
... |
6,378 | execute | import triton_python_backend_utils as pb_utils
from torch.utils.dlpack import to_dlpack
import torch
import numpy as np
import kaldifeat
import _kaldifeat
from typing import List
import json
class Fbank(torch.nn.Module):
def __init__(self, opts):
super(Fbank, self).__init__()
self.fbank = kaldifeat.Fbank(opts)
def forward(self, waves: List[torch.Tensor]):
return self.fbank(waves)
class TritonPythonModel:
"""Your Python model must use the same class name. Every Python model
that is created must have "TritonPythonModel" as the class name.
"""
def initialize(self, args):
"""`initialize` is called only once when the model is being loaded.
Implementing `initialize` function is optional. This function allows
the model to initialize any state associated with this model.
Parameters
----------
args : dict
Both keys and values are strings. The dictionary keys and values are:
* model_config: A JSON string containing the model configuration
* model_instance_kind: A string containing model instance kind
* model_instance_device_id: A string containing model instance device ID
* model_repository: Model repository path
* model_version: Model version
* model_name: Model name
"""
self.model_config = model_config = json.loads(args['model_config'])
self.max_batch_size = max(model_config["max_batch_size"], 1)
if "GPU" in model_config["instance_group"][0]["kind"]:
self.device = "cuda"
else:
self.device = "cpu"
# Get OUTPUT0 configuration
output0_config = pb_utils.get_output_config_by_name(
model_config, "speech")
# Convert Triton types to numpy types
output0_dtype = pb_utils.triton_string_to_numpy(
output0_config['data_type'])
if output0_dtype == np.float32:
self.output0_dtype = torch.float32
else:
self.output0_dtype = torch.float16
# Get OUTPUT1 configuration
output1_config = pb_utils.get_output_config_by_name(
model_config, "speech_lengths")
# Convert Triton types to numpy types
self.output1_dtype = pb_utils.triton_string_to_numpy(
output1_config['data_type'])
params = self.model_config['parameters']
opts = kaldifeat.FbankOptions()
opts.frame_opts.dither = 0
for li in params.items():
key, value = li
value = value["string_value"]
if key == "num_mel_bins":
opts.mel_opts.num_bins = int(value)
elif key == "frame_shift_in_ms":
opts.frame_opts.frame_shift_ms = float(value)
elif key == "frame_length_in_ms":
opts.frame_opts.frame_length_ms = float(value)
elif key == "sample_rate":
opts.frame_opts.samp_freq = int(value)
opts.device = torch.device(self.device)
self.opts = opts
self.feature_extractor = Fbank(self.opts)
self.feature_size = opts.mel_opts.num_bins
def METHOD_NAME(self, requests):
"""`execute` must be implemented in every Python model. `execute`
function receives a list of pb_utils.InferenceRequest as the only
argument. This function is called when an inference is requested
for this model.
Parameters
----------
requests : list
A list of pb_utils.InferenceRequest
Returns
-------
list
A list of pb_utils.InferenceResponse. The length of this list must
be the same as `requests`
"""
batch_count = []
total_waves = []
batch_len = []
responses = []
for request in requests:
input0 = pb_utils.get_input_tensor_by_name(request, "wav")
input1 = pb_utils.get_input_tensor_by_name(request, "wav_lens")
cur_b_wav = input0.as_numpy()
cur_b_wav = cur_b_wav * (1 << 15) # b x -1
cur_b_wav_lens = input1.as_numpy() # b x 1
cur_batch = cur_b_wav.shape[0]
cur_len = cur_b_wav.shape[1]
batch_count.append(cur_batch)
batch_len.append(cur_len)
for wav, wav_len in zip(cur_b_wav, cur_b_wav_lens):
wav_len = wav_len[0]
wav = torch.tensor(wav[0:wav_len], dtype=torch.float32,
device=self.device)
total_waves.append(wav)
features = self.feature_extractor(total_waves)
idx = 0
for b, l in zip(batch_count, batch_len):
expect_feat_len = _kaldifeat.num_frames(l, self.opts.frame_opts)
speech = torch.zeros((b, expect_feat_len, self.feature_size),
dtype=self.output0_dtype, device=self.device)
speech_lengths = torch.zeros((b, 1), dtype=torch.int32, device=self.device)
for i in range(b):
f = features[idx]
f_l = f.shape[0]
speech[i, 0: f_l, :] = f.to(self.output0_dtype)
speech_lengths[i][0] = f_l
idx += 1
# put speech feature on device will cause empty output
# we will follow this issue and now temporarily put it on cpu
speech = speech.cpu()
speech_lengths = speech_lengths.cpu()
out0 = pb_utils.Tensor.from_dlpack("speech", to_dlpack(speech))
out1 = pb_utils.Tensor.from_dlpack("speech_lengths",
to_dlpack(speech_lengths))
inference_response = pb_utils.InferenceResponse(output_tensors=[out0, out1])
responses.append(inference_response)
return responses |
6,379 | sort | # cython: language_level=3
# distutils: language = c++
# -*- coding: utf-8 -*-
# *****************************************************************************
# Copyright (c) 2016-2023, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
"""
Interface of the sorting function of the dpnp
Notes
-----
This module is a face or public interface file for the library
it contains:
- Interface functions
- documentation for the functions
- The functions parameters check
"""
import numpy
import dpnp
from dpnp.dpnp_algo import *
from dpnp.dpnp_utils import *
__all__ = ["argsort", "partition", "searchsorted", "sort"]
def argsort(in_array1, axis=-1, kind=None, order=None):
"""
Returns the indices that would sort an array.
For full documentation refer to :obj:`numpy.argsort`.
Limitations
-----------
Input array is supported as :obj:`dpnp.ndarray`.
Otherwise the function will be executed sequentially on CPU.
Prameters ``axis`` is supported only with default value ``-1``.
Prameters ``kind`` is supported only with default value ``None``.
Prameters ``order`` is supported only with default value ``None``.
Input array data types are limited by supported DPNP :ref:`Data types`.
See Also
--------
:obj:`dpnp.sort` : Describes sorting algorithms used.
:obj:`dpnp.lexsort` : Indirect stable sort with multiple keys.
:obj:`dpnp.argpartition` : Indirect partial sort.
:obj:`dpnp.take_along_axis` : Apply ``index_array`` from argsort to
an array as if by calling sort.
Examples
--------
>>> import dpnp as np
>>> x = np.array([3, 1, 2])
>>> out = np.argsort(x)
>>> [i for i in out]
[1, 2, 0]
"""
x1_desc = dpnp.get_dpnp_descriptor(
in_array1, copy_when_nondefault_queue=False
)
if x1_desc:
if axis != -1:
pass
elif kind is not None:
pass
elif order is not None:
pass
else:
return dpnp_argsort(x1_desc).get_pyobj()
return call_origin(numpy.argsort, in_array1, axis, kind, order)
def partition(x1, kth, axis=-1, kind="introselect", order=None):
"""
Return a partitioned copy of an array.
For full documentation refer to :obj:`numpy.partition`.
Limitations
-----------
Input array is supported as :obj:`dpnp.ndarray`.
Input kth is supported as :obj:`int`.
Parameters ``axis``, ``kind`` and ``order`` are supported only with default values.
"""
x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False)
if x1_desc:
if not isinstance(kth, int):
pass
elif x1_desc.ndim == 0:
pass
elif kth >= x1_desc.shape[x1_desc.ndim - 1] or x1_desc.ndim + kth < 0:
pass
elif axis != -1:
pass
elif kind != "introselect":
pass
elif order is not None:
pass
else:
return dpnp_partition(x1_desc, kth, axis, kind, order).get_pyobj()
return call_origin(numpy.partition, x1, kth, axis, kind, order)
def searchsorted(x1, x2, side="left", sorter=None):
"""
Find indices where elements should be inserted to maintain order.
For full documentation refer to :obj:`numpy.searchsorted`.
Limitations
-----------
Input arrays is supported as :obj:`dpnp.ndarray`.
Input array is supported only sorted.
Input side is supported only values ``left``, ``right``.
Parameters ``sorter`` is supported only with default values.
"""
x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False)
x2_desc = dpnp.get_dpnp_descriptor(x2, copy_when_nondefault_queue=False)
if 0 and x1_desc and x2_desc:
if x1_desc.ndim != 1:
pass
elif x1_desc.dtype != x2_desc.dtype:
pass
elif side not in ["left", "right"]:
pass
elif sorter is not None:
pass
elif x1_desc.size < 2:
pass
else:
return dpnp_searchsorted(x1_desc, x2_desc, side=side).get_pyobj()
return call_origin(numpy.searchsorted, x1, x2, side=side, sorter=sorter)
def METHOD_NAME(x1, **kwargs):
"""
Return a sorted copy of an array.
For full documentation refer to :obj:`numpy.sort`.
Limitations
-----------
Input array is supported as :obj:`dpnp.ndarray`.
Keyword arguments ``kwargs`` are currently unsupported.
Dimension of input array is supported to be equal to ``1``.
Otherwise the function will be executed sequentially on CPU.
Input array data types are limited by supported DPNP :ref:`Data types`.
See Also
--------
:obj:`dpnp.argsort` : Indirect sort.
:obj:`dpnp.lexsort` : Indirect stable sort on multiple keys.
:obj:`dpnp.searchsorted` : Find elements in a sorted array.
:obj:`dpnp.partition` : Partial sort.
Examples
--------
>>> import dpnp as np
>>> a = np.array([1, 4, 3, 1])
>>> out = np.sort(a)
>>> [i for i in out]
[1, 1, 3, 4]
"""
x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False)
if x1_desc and not kwargs:
if x1_desc.ndim != 1:
pass
else:
return dpnp_sort(x1_desc).get_pyobj()
return call_origin(numpy.METHOD_NAME, x1, **kwargs) |
6,380 | test equality | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2023
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
import pytest
from telegram import (
InlineKeyboardButton,
InlineKeyboardMarkup,
InlineQueryResultGif,
InlineQueryResultVoice,
InputTextMessageContent,
MessageEntity,
)
from tests.auxil.slots import mro_slots
@pytest.fixture(scope="module")
def inline_query_result_gif():
return InlineQueryResultGif(
TestInlineQueryResultGifBase.id_,
TestInlineQueryResultGifBase.gif_url,
TestInlineQueryResultGifBase.thumbnail_url,
gif_width=TestInlineQueryResultGifBase.gif_width,
gif_height=TestInlineQueryResultGifBase.gif_height,
gif_duration=TestInlineQueryResultGifBase.gif_duration,
title=TestInlineQueryResultGifBase.title,
caption=TestInlineQueryResultGifBase.caption,
parse_mode=TestInlineQueryResultGifBase.parse_mode,
caption_entities=TestInlineQueryResultGifBase.caption_entities,
input_message_content=TestInlineQueryResultGifBase.input_message_content,
reply_markup=TestInlineQueryResultGifBase.reply_markup,
thumbnail_mime_type=TestInlineQueryResultGifBase.thumbnail_mime_type,
)
class TestInlineQueryResultGifBase:
id_ = "id"
type_ = "gif"
gif_url = "gif url"
gif_width = 10
gif_height = 15
gif_duration = 1
thumbnail_url = "thumb url"
thumbnail_mime_type = "image/jpeg"
title = "title"
caption = "caption"
parse_mode = "HTML"
caption_entities = [MessageEntity(MessageEntity.ITALIC, 0, 7)]
input_message_content = InputTextMessageContent("input_message_content")
reply_markup = InlineKeyboardMarkup([[InlineKeyboardButton("reply_markup")]])
class TestInlineQueryResultGifWithoutRequest(TestInlineQueryResultGifBase):
def test_slot_behaviour(self, inline_query_result_gif):
inst = inline_query_result_gif
for attr in inst.__slots__:
assert getattr(inst, attr, "err") != "err", f"got extra slot '{attr}'"
assert len(mro_slots(inst)) == len(set(mro_slots(inst))), "duplicate slot"
def test_caption_entities_always_tuple(self):
result = InlineQueryResultGif(self.id_, self.gif_url, self.thumbnail_url)
assert result.caption_entities == ()
def test_expected_values(self, inline_query_result_gif):
assert inline_query_result_gif.type == self.type_
assert inline_query_result_gif.id == self.id_
assert inline_query_result_gif.gif_url == self.gif_url
assert inline_query_result_gif.gif_width == self.gif_width
assert inline_query_result_gif.gif_height == self.gif_height
assert inline_query_result_gif.gif_duration == self.gif_duration
assert inline_query_result_gif.thumbnail_url == self.thumbnail_url
assert inline_query_result_gif.thumbnail_mime_type == self.thumbnail_mime_type
assert inline_query_result_gif.title == self.title
assert inline_query_result_gif.caption == self.caption
assert inline_query_result_gif.parse_mode == self.parse_mode
assert inline_query_result_gif.caption_entities == tuple(self.caption_entities)
assert (
inline_query_result_gif.input_message_content.to_dict()
== self.input_message_content.to_dict()
)
assert inline_query_result_gif.reply_markup.to_dict() == self.reply_markup.to_dict()
def test_to_dict(self, inline_query_result_gif):
inline_query_result_gif_dict = inline_query_result_gif.to_dict()
assert isinstance(inline_query_result_gif_dict, dict)
assert inline_query_result_gif_dict["type"] == inline_query_result_gif.type
assert inline_query_result_gif_dict["id"] == inline_query_result_gif.id
assert inline_query_result_gif_dict["gif_url"] == inline_query_result_gif.gif_url
assert inline_query_result_gif_dict["gif_width"] == inline_query_result_gif.gif_width
assert inline_query_result_gif_dict["gif_height"] == inline_query_result_gif.gif_height
assert inline_query_result_gif_dict["gif_duration"] == inline_query_result_gif.gif_duration
assert (
inline_query_result_gif_dict["thumbnail_url"] == inline_query_result_gif.thumbnail_url
)
assert (
inline_query_result_gif_dict["thumbnail_mime_type"]
== inline_query_result_gif.thumbnail_mime_type
)
assert inline_query_result_gif_dict["title"] == inline_query_result_gif.title
assert inline_query_result_gif_dict["caption"] == inline_query_result_gif.caption
assert inline_query_result_gif_dict["parse_mode"] == inline_query_result_gif.parse_mode
assert inline_query_result_gif_dict["caption_entities"] == [
ce.to_dict() for ce in inline_query_result_gif.caption_entities
]
assert (
inline_query_result_gif_dict["input_message_content"]
== inline_query_result_gif.input_message_content.to_dict()
)
assert (
inline_query_result_gif_dict["reply_markup"]
== inline_query_result_gif.reply_markup.to_dict()
)
def METHOD_NAME(self):
a = InlineQueryResultGif(self.id_, self.gif_url, self.thumbnail_url)
b = InlineQueryResultGif(self.id_, self.gif_url, self.thumbnail_url)
c = InlineQueryResultGif(self.id_, "", self.thumbnail_url)
d = InlineQueryResultGif("", self.gif_url, self.thumbnail_url)
e = InlineQueryResultVoice(self.id_, "", "")
assert a == b
assert hash(a) == hash(b)
assert a is not b
assert a == c
assert hash(a) == hash(c)
assert a != d
assert hash(a) != hash(d)
assert a != e
assert hash(a) != hash(e) |
6,381 | test it works with puerto rico | import json
from project.util.geojson import FeatureGeometry
import pytest
from django.core.management import call_command
import urllib.parse
from project.justfix_environment import BASE_DIR
from project.mapbox import (
_encode_query_for_places_request,
mapbox_places_request,
find_city,
get_mapbox_state,
get_mapbox_zip_code,
get_mapbox_street_addr,
does_city_match,
find_address,
StreetAddress,
MapboxFeature,
MAPBOX_PLACES_URL,
)
JSON_DIR = BASE_DIR / "frontend" / "lib" / "forms" / "mapbox" / "tests"
BROOKLYN_FEATURE_JSON = json.loads((JSON_DIR / "brooklyn.json").read_text())
BROOKLYN_FEATURE = MapboxFeature(**BROOKLYN_FEATURE_JSON)
SAN_JUAN_FEATURE_JSON = json.loads((JSON_DIR / "san-juan.json").read_text())
SAN_JUAN_FEATURE = MapboxFeature(**SAN_JUAN_FEATURE_JSON)
BRL_FEATURE_JSON = json.loads((JSON_DIR / "brl.json").read_text())
BRL_FEATURE = MapboxFeature(**BRL_FEATURE_JSON)
BRL_RESULTS_JSON = {
"features": [BRL_FEATURE_JSON],
}
BROOKLYN_RESULTS_JSON = {
"features": [BROOKLYN_FEATURE_JSON],
}
LA_FEATURE_JSON = json.loads((JSON_DIR / "la-city-hall.json").read_text())
LA_FEATURE = MapboxFeature(**LA_FEATURE_JSON)
LA_RESULTS_JSON = {
"features": [LA_FEATURE_JSON],
}
def mkfeature(base=BROOKLYN_FEATURE_JSON, **kwargs):
final_kwargs = {
**base,
**kwargs,
}
return MapboxFeature(**final_kwargs)
@pytest.fixture(autouse=True)
def setup_fixture(settings):
settings.MAPBOX_ACCESS_TOKEN = "boop"
def mock_places_request(query: str, json_data, requests_mock):
url = f"{MAPBOX_PLACES_URL}/{urllib.parse.quote(query)}.json"
requests_mock.get(url, json=json_data)
def mock_brooklyn_results(query: str, requests_mock):
mock_places_request(query, BROOKLYN_RESULTS_JSON, requests_mock)
def mock_brl_results(query: str, requests_mock):
mock_places_request(query, BRL_RESULTS_JSON, requests_mock)
def mock_la_results(query: str, requests_mock):
mock_places_request(query, LA_RESULTS_JSON, requests_mock)
def mock_no_results(query: str, requests_mock):
mock_places_request(query, {"features": []}, requests_mock)
class TestGetMapboxState:
def test_it_returns_none_on_no_match(self):
assert get_mapbox_state(mkfeature(context=[])) is None
def test_it_returns_state_on_match(self):
assert get_mapbox_state(BROOKLYN_FEATURE) == "NY"
def METHOD_NAME(self):
assert get_mapbox_state(SAN_JUAN_FEATURE) == "PR"
class TestGetMapboxZipCode:
def test_it_returns_none_on_no_match(self):
assert get_mapbox_zip_code(mkfeature(context=[])) is None
def test_it_returns_zipcode_on_match(self):
assert get_mapbox_zip_code(BRL_FEATURE) == "11201"
class TestDoesCityMatch:
def test_it_returns_false_on_no_match(self):
assert does_city_match("columbus", BRL_FEATURE) is False
def test_it_returns_true_on_match(self):
assert does_city_match("BROOKLYN", BRL_FEATURE) is True
assert does_city_match("Brooklyn", BRL_FEATURE) is True
@pytest.mark.parametrize(
"feature,expected",
[
(BRL_FEATURE, "150 Court Street"),
(mkfeature(BRL_FEATURE_JSON, address=None), "Court Street"),
],
)
def test_get_mapbox_street_addr(feature, expected):
assert get_mapbox_street_addr(feature) == expected
@pytest.mark.parametrize(
"query,expected",
[
# Ensure slashes are escaped.
("1/2", "1%2F2"),
# Ensure semicolons are replaced with commas.
("boop;jones", "boop%2Cjones"),
],
)
def test_encode_query_for_places_request(query, expected):
assert _encode_query_for_places_request(query) == expected
class TestMapboxPlacesRequest:
def test_it_returns_none_when_mapbox_is_disabled(self, settings):
settings.MAPBOX_ACCESS_TOKEN = ""
assert mapbox_places_request("blah", {}) is None
def test_it_returns_none_on_http_500(self, requests_mock):
requests_mock.get(f"{MAPBOX_PLACES_URL}/a%20b.json", status_code=500)
assert mapbox_places_request("a b", {}) is None
def test_it_returns_empty_results_on_http_422(self, requests_mock):
requests_mock.get(f"{MAPBOX_PLACES_URL}/a%20b.json", status_code=422)
assert mapbox_places_request("a b", {}).features == []
def test_it_returns_results_on_success(self, requests_mock):
requests_mock.get(f"{MAPBOX_PLACES_URL}/br.json", json=BROOKLYN_RESULTS_JSON)
results = mapbox_places_request("br", {})
assert results and results.features[0].text == "Brooklyn"
class TestFindCity:
def test_it_returns_none_on_mapbox_failure(self, settings):
settings.MAPBOX_ACCESS_TOKEN = ""
assert find_city("zzz", "OH") is None
def test_it_returns_empty_list_when_no_states_match(self, requests_mock):
mock_brooklyn_results("brook, GA", requests_mock)
assert find_city("brook", "GA") == []
def test_it_returns_nonempty_list_when_states_match(self, requests_mock):
mock_brooklyn_results("brook, NY", requests_mock)
assert find_city("brook", "NY") == [("Brooklyn", (-73.9496, 40.6501))]
class TestFindAddress:
BRL = StreetAddress(
"150 Court Street",
"11201",
"150 Court Street, Brooklyn, New York 11201, United States",
FeatureGeometry(type="Point", coordinates=[-73.992972, 40.688772]),
)
def test_it_returns_none_on_mapbox_failure(self, settings):
settings.MAPBOX_ACCESS_TOKEN = ""
assert find_address("zzz", "blarg", "OH", "12345") is None
def test_it_returns_empty_list_when_no_addresses_match(self, requests_mock):
mock_brl_results("1 boop st, bespin, OH 12345", requests_mock)
assert find_address("1 boop st", "bespin", "OH", "12345") == []
def test_it_returns_nonempty_list_when_addresses_match(self, requests_mock):
mock_brl_results("150 court st, brooklyn, NY 12345", requests_mock)
assert find_address("150 court st", "brooklyn", "NY", "12345") == [self.BRL]
def test_it_can_include_results_in_same_state_outside_of_city(self, requests_mock):
mock_brl_results("1 boop st, bespin, NY 12345", requests_mock)
assert find_address("1 boop st", "bespin", "NY", "12345") == [self.BRL]
def test_findmapboxcity_command_does_not_explode(settings):
settings.MAPBOX_ACCESS_TOKEN = ""
call_command("findmapboxcity", "brooklyn", "NY")
def test_findmapboxaddr_command_does_not_explode(settings):
settings.MAPBOX_ACCESS_TOKEN = ""
call_command("findmapboxaddr", "150 court st", "brooklyn", "NY", "11201") |
6,382 | set test params | #!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test running bitcoind with the -rpcbind and -rpcallowip options."""
import sys
from test_framework.netutil import all_interfaces, addr_to_hex, get_bind_addrs, test_ipv6_local
from test_framework.test_framework import BitcoinTestFramework, SkipTest
from test_framework.util import assert_equal, assert_raises_rpc_error, get_rpc_proxy, rpc_port, rpc_url
class RPCBindTest(BitcoinTestFramework):
def METHOD_NAME(self):
self.setup_clean_chain = True
self.bind_to_localhost_only = False
self.num_nodes = 1
self.supports_cli = False
def setup_network(self):
self.add_nodes(self.num_nodes, None)
def add_options(self, parser):
parser.add_argument("--ipv4", action='store_true', dest="run_ipv4", help="Run ipv4 tests only", default=False)
parser.add_argument("--ipv6", action='store_true', dest="run_ipv6", help="Run ipv6 tests only", default=False)
parser.add_argument("--nonloopback", action='store_true', dest="run_nonloopback", help="Run non-loopback tests only", default=False)
def run_bind_test(self, allow_ips, connect_to, addresses, expected):
'''
Start a node with requested rpcallowip and rpcbind parameters,
then try to connect, and check if the set of bound addresses
matches the expected set.
'''
self.log.info("Bind test for %s" % str(addresses))
expected = [(addr_to_hex(addr), port) for (addr, port) in expected]
base_args = ['-disablewallet', '-nolisten']
if allow_ips:
base_args += ['-rpcallowip=' + x for x in allow_ips]
binds = ['-rpcbind='+addr for addr in addresses]
self.nodes[0].rpchost = connect_to
self.start_node(0, base_args + binds)
pid = self.nodes[0].process.pid
assert_equal(set(get_bind_addrs(pid)), set(expected))
self.stop_nodes()
def run_allowip_test(self, allow_ips, rpchost, rpcport):
'''
Start a node with rpcallow IP, and request getnetworkinfo
at a non-localhost IP.
'''
self.log.info("Allow IP test for %s:%d" % (rpchost, rpcport))
node_args = \
['-disablewallet', '-nolisten'] + \
['-rpcallowip='+x for x in allow_ips] + \
['-rpcbind='+addr for addr in ['127.0.0.1', "%s:%d" % (rpchost, rpcport)]] # Bind to localhost as well so start_nodes doesn't hang
self.nodes[0].rpchost = None
self.start_nodes([node_args])
# connect to node through non-loopback interface
node = get_rpc_proxy(rpc_url(self.nodes[0].datadir, 0, self.chain, "%s:%d" % (rpchost, rpcport)), 0, coveragedir=self.options.coveragedir)
node.getnetworkinfo()
self.stop_nodes()
def run_test(self):
# due to OS-specific network stats queries, this test works only on Linux
if sum([self.options.run_ipv4, self.options.run_ipv6, self.options.run_nonloopback]) > 1:
raise AssertionError("Only one of --ipv4, --ipv6 and --nonloopback can be set")
self.log.info("Check for linux")
if not sys.platform.startswith('linux'):
raise SkipTest("This test can only be run on linux.")
self.log.info("Check for ipv6")
have_ipv6 = test_ipv6_local()
if not have_ipv6 and not (self.options.run_ipv4 or self.options.run_nonloopback):
raise SkipTest("This test requires ipv6 support.")
self.log.info("Check for non-loopback interface")
self.non_loopback_ip = None
for name,ip in all_interfaces():
if ip != '127.0.0.1':
self.non_loopback_ip = ip
break
if self.non_loopback_ip is None and self.options.run_nonloopback:
raise SkipTest("This test requires a non-loopback ip address.")
self.defaultport = rpc_port(0)
if not self.options.run_nonloopback:
self._run_loopback_tests()
if not self.options.run_ipv4 and not self.options.run_ipv6:
self._run_nonloopback_tests()
def _run_loopback_tests(self):
if self.options.run_ipv4:
# check only IPv4 localhost (explicit)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1'],
[('127.0.0.1', self.defaultport)])
# check only IPv4 localhost (explicit) with alternative port
self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171'],
[('127.0.0.1', 32171)])
# check only IPv4 localhost (explicit) with multiple alternative ports on same host
self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171', '127.0.0.1:32172'],
[('127.0.0.1', 32171), ('127.0.0.1', 32172)])
else:
# check default without rpcallowip (IPv4 and IPv6 localhost)
self.run_bind_test(None, '127.0.0.1', [],
[('127.0.0.1', self.defaultport), ('::1', self.defaultport)])
# check default with rpcallowip (IPv4 and IPv6 localhost)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', [],
[('127.0.0.1', self.defaultport), ('::1', self.defaultport)])
# check only IPv6 localhost (explicit)
self.run_bind_test(['[::1]'], '[::1]', ['[::1]'],
[('::1', self.defaultport)])
# check both IPv4 and IPv6 localhost (explicit)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1', '[::1]'],
[('127.0.0.1', self.defaultport), ('::1', self.defaultport)])
def _run_nonloopback_tests(self):
self.log.info("Using interface %s for testing" % self.non_loopback_ip)
# check only non-loopback interface
self.run_bind_test([self.non_loopback_ip], self.non_loopback_ip, [self.non_loopback_ip],
[(self.non_loopback_ip, self.defaultport)])
# Check that with invalid rpcallowip, we are denied
self.run_allowip_test([self.non_loopback_ip], self.non_loopback_ip, self.defaultport)
assert_raises_rpc_error(-342, "non-JSON HTTP response with '403 Forbidden' from server", self.run_allowip_test, ['1.1.1.1'], self.non_loopback_ip, self.defaultport)
if __name__ == '__main__':
RPCBindTest().main() |
6,383 | delete | #
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 Raphael Michel and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
import hashlib
import time
from typing import Callable, Dict, List
from django.core.cache import caches
from django.db.models import Model
class NamespacedCache:
def __init__(self, prefixkey: str, cache: str='default'):
self.cache = caches[cache]
self.prefixkey = prefixkey
self._last_prefix = None
def _prefix_key(self, original_key: str, known_prefix=None) -> str:
# Race conditions can happen here, but should be very very rare.
# We could only handle this by going _really_ lowlevel using
# memcached's `add` keyword instead of `set`.
# See also:
# https://code.google.com/p/memcached/wiki/NewProgrammingTricks#Namespacing
prefix = known_prefix or self.cache.get(self.prefixkey)
if prefix is None:
prefix = int(time.time())
self.cache.set(self.prefixkey, prefix)
self._last_prefix = prefix
key = '%s:%d:%s' % (self.prefixkey, prefix, original_key)
if len(key) > 200: # Hash long keys, as memcached has a length limit
# TODO: Use a more efficient, non-cryptographic hash algorithm
key = hashlib.sha256(key.encode("UTF-8")).hexdigest()
return key
def _strip_prefix(self, key: str) -> str:
return key.split(":", 2 + self.prefixkey.count(":"))[-1]
def clear(self) -> None:
self._last_prefix = None
try:
prefix = self.cache.incr(self.prefixkey, 1)
except ValueError:
prefix = int(time.time())
self.cache.set(self.prefixkey, prefix)
def set(self, key: str, value: any, timeout: int=300):
return self.cache.set(self._prefix_key(key), value, timeout)
def get(self, key: str) -> any:
return self.cache.get(self._prefix_key(key, known_prefix=self._last_prefix))
def get_or_set(self, key: str, default: Callable, timeout=300) -> any:
return self.cache.get_or_set(
self._prefix_key(key, known_prefix=self._last_prefix),
default=default,
timeout=timeout
)
def get_many(self, keys: List[str]) -> Dict[str, any]:
values = self.cache.get_many([self._prefix_key(key) for key in keys])
newvalues = {}
for k, v in values.items():
newvalues[self._strip_prefix(k)] = v
return newvalues
def set_many(self, values: Dict[str, any], timeout=300):
newvalues = {}
for k, v in values.items():
newvalues[self._prefix_key(k)] = v
return self.cache.set_many(newvalues, timeout)
def METHOD_NAME(self, key: str): # NOQA
return self.cache.METHOD_NAME(self._prefix_key(key))
def delete_many(self, keys: List[str]): # NOQA
return self.cache.delete_many([self._prefix_key(key) for key in keys])
def incr(self, key: str, by: int=1): # NOQA
return self.cache.incr(self._prefix_key(key), by)
def decr(self, key: str, by: int=1): # NOQA
return self.cache.decr(self._prefix_key(key), by)
def close(self): # NOQA
pass
class ObjectRelatedCache(NamespacedCache):
"""
This object behaves exactly like the cache implementations by Django
but with one important difference: It stores all keys related to a
certain object, so you pass an object when creating this object and if
you store data in this cache, it is only stored for this object. The
main purpose of this is to be able to flush all cached data related
to this object at once.
The ObjectRelatedCache instance itself is stateless, all state is
stored in the cache backend, so you can instantiate this class as many
times as you want.
"""
def __init__(self, obj: Model, cache: str='default'):
assert isinstance(obj, Model)
super().__init__('%s:%s' % (obj._meta.object_name, obj.pk), cache) |
6,384 | sample | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from typing import Dict, List, Optional, Tuple
import numpy as np
from gluonts.core.component import validated
from gluonts.mx import Tensor
from .distribution import Distribution, _sample_multiple, getF, softplus
from .distribution_output import DistributionOutput
class Beta(Distribution):
r"""
Beta distribution.
Parameters
----------
alpha
Tensor containing the alpha shape parameters, of shape
`(*batch_shape, *event_shape)`.
beta
Tensor containing the beta shape parameters, of shape
`(*batch_shape, *event_shape)`.
F
"""
is_reparameterizable = False
@validated()
def __init__(self, alpha: Tensor, beta: Tensor) -> None:
self.alpha = alpha
self.beta = beta
@property
def F(self):
return getF(self.alpha)
@property
def batch_shape(self) -> Tuple:
return self.alpha.shape
@property
def event_shape(self) -> Tuple:
return ()
@property
def event_dim(self) -> int:
return 0
def log_prob(self, x: Tensor) -> Tensor:
F = self.F
alpha, beta = self.alpha, self.beta
return (
(alpha - 1) * F.log(x)
+ (beta - 1) * F.log(1 - x)
- F.gammaln(alpha)
- F.gammaln(beta)
+ F.gammaln(alpha + beta)
)
@property
def mean(self) -> Tensor:
return self.alpha / (self.alpha + self.beta)
@property
def variance(self) -> Tensor:
F = self.F
alpha, beta = self.alpha, self.beta
return (alpha * beta) / (F.square(alpha + beta) * (alpha + beta + 1))
@property
def stddev(self) -> Tensor:
return self.F.sqrt(self.variance)
def METHOD_NAME(
self, num_samples: Optional[int] = None, dtype=np.float32
) -> Tensor:
epsilon = np.finfo(dtype).eps # machine epsilon
def s(alpha: Tensor, beta: Tensor) -> Tensor:
F = getF(alpha)
samples_X = F.sample_gamma(
alpha=alpha, beta=F.ones_like(alpha), dtype=dtype
)
samples_Y = F.sample_gamma(
alpha=beta, beta=F.ones_like(beta), dtype=dtype
)
return samples_X / (samples_X + samples_Y)
samples = _sample_multiple(
s, alpha=self.alpha, beta=self.beta, num_samples=num_samples
)
return self.F.clip(data=samples, a_min=epsilon, a_max=1 - epsilon)
@property
def args(self) -> List:
return [self.alpha, self.beta]
class BetaOutput(DistributionOutput):
args_dim: Dict[str, int] = {"alpha": 1, "beta": 1}
distr_cls: type = Beta
@classmethod
def domain_map(cls, F, alpha, beta):
r"""
Maps raw tensors to valid arguments for constructing a Beta
distribution.
Parameters
----------
F:
alpha:
Tensor of shape `(*batch_shape, 1)`
beta:
Tensor of shape `(*batch_shape, 1)`
Returns
-------
Tuple[Tensor, Tensor]:
Two squeezed tensors, of shape `(*batch_shape)`: both have entries
mapped to the positive orthant.
"""
alpha = F.maximum(softplus(F, alpha), cls.eps())
beta = F.maximum(softplus(F, beta), cls.eps())
return alpha.squeeze(axis=-1), beta.squeeze(axis=-1)
@property
def event_shape(self) -> Tuple:
return ()
@property
def value_in_support(self) -> float:
return 0.5 |
6,385 | command id | from __future__ import annotations
import abc
import copy
import gettext
import typing
_ = gettext.gettext
class UndoableCommand(abc.ABC):
def __init__(self, title: str, *, METHOD_NAME: typing.Optional[str] = None, is_mergeable: bool = False) -> None:
self.__old_modified_state = None
self.__new_modified_state = None
self.__title = title
self.__command_id = METHOD_NAME
self.__is_mergeable = is_mergeable
def close(self) -> None:
self.__old_modified_state = None
self.__new_modified_state = None
@property
def title(self) -> str:
return self.__title
@property
def METHOD_NAME(self) -> typing.Optional[str]:
return self.__command_id
@property
def is_mergeable(self) -> bool:
return self.__is_mergeable
@property
def is_redo_valid(self) -> bool:
return self._compare_modified_states(self.__old_modified_state, self._get_modified_state())
@property
def is_undo_valid(self) -> bool:
return self._compare_modified_states(self.__new_modified_state, self._get_modified_state())
def _compare_modified_states(self, state1: typing.Any, state2: typing.Any) -> bool:
# override to allow the undo command to track state; but only use part of the state for comparison
return bool(state1 == state2)
def initialize(self, modified_state: typing.Any = None) -> None:
self.__old_modified_state = modified_state if modified_state else self._get_modified_state()
@property
def _old_modified_state(self) -> typing.Any:
return self.__old_modified_state
def commit(self) -> None:
self.__new_modified_state = self._get_modified_state()
def perform(self) -> None:
self._perform()
def undo(self) -> None:
self._undo()
self._set_modified_state(self.__old_modified_state)
self.__is_mergeable = False
def redo(self) -> None:
self._redo()
self._set_modified_state(self.__new_modified_state)
def can_merge(self, command: UndoableCommand) -> bool:
return False
def merge(self, command: UndoableCommand) -> None:
assert self.METHOD_NAME and self.METHOD_NAME == command.METHOD_NAME
self._merge(command)
self.__new_modified_state = self._get_modified_state()
def _merge(self, command: UndoableCommand) -> None:
pass
@abc.abstractmethod
def _get_modified_state(self) -> typing.Any:
pass
@abc.abstractmethod
def _set_modified_state(self, modified_state: typing.Any) -> None:
pass
def _perform(self) -> None:
pass
@abc.abstractmethod
def _undo(self) -> None:
pass
def _redo(self) -> None:
self._undo()
class UndoStack:
def __init__(self) -> None:
# undo/redo stack. next item is at the end.
self.__undo_stack: typing.List[UndoableCommand] = list()
self.__redo_stack: typing.List[UndoableCommand] = list()
def close(self) -> None:
self.clear()
@property
def can_redo(self) -> bool:
return len(self.__redo_stack) > 0 and self.__redo_stack[-1].is_redo_valid
@property
def can_undo(self) -> bool:
return len(self.__undo_stack) > 0 and self.__undo_stack[-1].is_undo_valid
@property
def last_command(self) -> typing.Optional[UndoableCommand]:
return self.__undo_stack[-1] if self.__undo_stack else None
def pop_command(self) -> None:
self.__undo_stack.pop().close() if self.__undo_stack else None
def validate(self) -> None:
if len(self.__undo_stack) > 0 and not self.__undo_stack[-1].is_undo_valid:
self.clear()
@property
def undo_title(self) -> str:
if self.can_undo:
return _("Undo") + " " + self.__undo_stack[-1].title
return _("Undo")
@property
def redo_title(self) -> str:
if self.can_redo:
return _("Redo") + " " + self.__redo_stack[-1].title
return _("Redo")
@property
def _undo_count(self) -> int:
return len(self.__undo_stack) # for testing
@property
def _redo_count(self) -> int:
return len(self.__redo_stack) # for testing
def clear(self) -> None:
while len(self.__redo_stack) > 0:
self.__redo_stack.pop().close()
while (len(self.__undo_stack)) > 0:
self.__undo_stack.pop().close()
def undo(self) -> None:
assert len(self.__undo_stack) > 0
undo_command = self.__undo_stack.pop()
undo_command.undo()
self.__redo_stack.append(undo_command)
def redo(self) -> None:
assert len(self.__redo_stack) > 0
undo_command = self.__redo_stack.pop()
undo_command.redo()
self.__undo_stack.append(undo_command)
def push(self, undo_command: UndoableCommand) -> None:
assert undo_command
undo_command.commit()
last_undo_command = self.__undo_stack[-1] if self.__undo_stack else None
if last_undo_command and last_undo_command.is_mergeable and undo_command.is_mergeable and last_undo_command.METHOD_NAME == undo_command.METHOD_NAME:
last_undo_command.merge(undo_command)
undo_command.close()
else:
self.__undo_stack.append(undo_command)
while len(self.__redo_stack) > 0:
self.__redo_stack.pop().close() |
6,386 | verify elemwise sum | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for tensor operator"""
import numpy as np
import tvm
from tvm import te
from tvm import topi
import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
from tvm.contrib.nvcc import have_fp16
import tvm.testing
def METHOD_NAME(num_args, dtype):
shape = (3, 5, 4)
tvm_placeholders = []
for i in range(num_args):
tvm_placeholders.append(te.placeholder(shape, name="data" + str(i), dtype=dtype))
esum = topi.elemwise_sum(tvm_placeholders)
s = te.create_schedule([esum.op])
@memoize("topi.tests.test_topi_elemwise_sum")
def get_ref_data():
np_nd = [np.random.uniform(0, 10, size=shape).astype(dtype) for i in range(num_args)]
return np_nd
np_nd = get_ref_data()
def check_target(target):
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
dev = tvm.device(target, 0)
out = tvm.nd.array(np.zeros(shape, dtype=dtype), dev)
f = tvm.build(s, tvm_placeholders + [esum], target, name="elemwise_sum")
tvm_nd = [tvm.nd.array(nd, dev) for nd in np_nd] + [out]
f(*tvm_nd)
np_out = np.sum(np.array(np_nd), axis=0)
tvm.testing.assert_allclose(out.numpy(), np_out, rtol=1e-5)
for target in ["llvm"]:
check_target(target)
def verify_full(shape, dtype, fill_value):
A = te.placeholder(shape, dtype=dtype, name="A")
B = topi.full_like(A, fill_value=fill_value)
C = topi.full(shape=shape, dtype=dtype, fill_value=fill_value)
s1 = te.create_schedule([B.op])
s2 = te.create_schedule([C.op])
@memoize("topi.tests.test_topi_full")
def get_ref_data():
return np.full(shape, fill_value, dtype)
np_nd = get_ref_data()
def check_target(target):
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
dev = tvm.device(target, 0)
out = tvm.nd.array(np.zeros(shape, dtype=dtype), dev)
f = tvm.build(s1, [A, B], target, name="full_like")
f(tvm.nd.array(np.zeros(shape, dtype), dev), out)
tvm.testing.assert_allclose(out.numpy(), np_nd, rtol=1e-5)
f = tvm.build(s2, [C], target, name="full")
f(out)
tvm.testing.assert_allclose(out.numpy(), np_nd, rtol=1e-5)
for target in ["llvm"]:
check_target(target)
def verify_vectorization(n, m, dtype):
def check_targeta(targeta):
if not tvm.testing.device_enabled(targeta):
print("Skip because %s is not enabled" % targeta)
return
if dtype == "float16" and targeta == "cuda" and not have_fp16(tvm.cuda(0).compute_version):
print("Skip because gpu does not have fp16 support")
return
with tvm.target.Target(targeta):
dev = tvm.device(targeta, 0)
A = te.placeholder((n, m), name="A", dtype=dtype)
B = te.compute((n, m), lambda i, j: A[i, j] + tvm.tir.const(1, A.dtype), name="B")
S = tvm.topi.testing.get_elemwise_schedule(targeta)(B)
fun = tvm.build(S, [A, B], targeta)
np_A = tvm.nd.empty((n, m), A.dtype, dev).copyfrom(np.random.uniform(size=(n, m)))
np_B = tvm.nd.empty((n, m), B.dtype, dev)
fun(np_A, np_B)
tvm.testing.assert_allclose(np_B.numpy(), np_A.numpy() + 1, rtol=1e-5)
for targeta in ["cuda"]:
check_targeta(targeta)
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_vectorization():
verify_vectorization(128, 64, "float16")
def test_elemwise_sum():
METHOD_NAME(1, "float32")
METHOD_NAME(5, "float32")
METHOD_NAME(4, "int32")
def test_full():
verify_full((3, 4, 5), "float32", 3.14)
verify_full((10,), "int32", 7)
if __name__ == "__main__":
test_elemwise_sum()
test_full()
test_vectorization() |
6,387 | import outline | #!/usr/bin/python
# coding=utf8
import os
import argparse
import mysql.connector
import sys
class Config:
pass
g_config = Config()
class Stat:
def reset(self):
self.db_count = 0
self.outline_count = 0
def __init__(self):
self.reset()
g_stat = Stat()
class ImportStat:
def reset(self):
self.succ_db_count = 0
self.fail_db_count = 0
self.succ_outline_count = 0
self.fail_outline_count = 0
def __init__(self):
self.reset()
g_import_stat = ImportStat()
#参数
#-h -P -u -p -t -i -d
#如果-t没指定就是所有租户
#
#-d 表示dump
#-i 表示import
#查询__all_server表。获取有多少台server, 设置并发度
#设置查询超时时间
#查询GV$OB_PLAN_CACHE_PLAN_STAT表获取outline_data
#
#select sql_id, sql_text, outline_data from GV$OB_PLAN_CACHE_PLAN_STAT。检查sql_id是否有冲突,outline_data是否有冲突//获取对应的database。
#
#dump成outline语句, 生成对应的database。
#生成outline语句
def get_connect():
return mysql.connector.connect(host = g_config.host, port = g_config.port, user = g_config.username, password = g_config.password)
def get_real_db_id(tenant_id, db_id):
if db_id == 1:
return tenant_id << 40 | db_id
else:
return db_id
def get_args(args):
parser = argparse.ArgumentParser(add_help = False)
parser.add_argument('-h', '--host', dest='host', type=str)
parser.add_argument('-P', '--port', dest='port', type=int)
parser.add_argument('-u', '--username', dest='username', type=str)
parser.add_argument('-p', '--password', dest='password', type=str)
parser.add_argument('-t', '--tenant', dest='tenant', type=int)
parser.add_argument('-d', '--dump', dest='dump', action='store_true')
parser.add_argument('-i', '--import', dest='import1', action='store_true')
ret = parser.parse_args(args)
if ret.host == None:
print >> sys.stderr, 'please give hostname: -h'
return -1
else:
g_config.host = ret.host
if ret.port == None:
print >> sys.stderr, 'please give port: -P'
return -1
else:
g_config.port = ret.port
if ret.username == None:
print >> sys.stderr, 'please give username: -u'
return -1
else:
g_config.username = ret.username
if ret.tenant == None:
print >> sys.stderr, 'please give tenant_id: -t'
return -1
else:
g_config.tenant = ret.tenant
g_config.password = ret.password
if ret.dump == False and ret.import1 == False:
print >> sys.stderr, 'please give dump or import: -d/-i'
return -1
elif ret.dump == True and ret.import1 == True:
print >> sys.stderr, 'only dump or import: -d/-i'
return -1
else:
g_config.dump = ret.dump
return 0
def output(name, sql_id, outline_data):
g_stat.outline_count += 1
print "create outline auto_gen_%s on '%s' using hint %s;" % (name, sql_id, outline_data)
def check(lines):
for i in range(1,len(lines)):
if lines[i][1] != lines[0][1]:
return False
elif lines[i][2] != lines[0][2]:
return False
return True
def get_db_name(db_id):
conn = get_connect()
cur = conn.cursor()
sql = "select database_name from oceanbase.__all_database where database_id = %d and tenant_id = %d" % (db_id, g_config.tenant)
cur.execute(sql)
rs = cur.fetchone()
return rs[0]
def dump_db_outline(db_id, items):
if db_id == 18446744073709551615:
for item in items:
print >> sys.stderr, "sql_id = %s | sql_text = %s : no use database" % (item[0], item[1])
return
db_name = get_db_name(db_id)
g_stat.db_count += 1
print
print "use %s;" % db_name
map = {}
for line in items:
sql_id = line[0]
sql_text = line[1]
outline_data = line[2]
if sql_id in map:
map[sql_id].append((sql_id, sql_text, outline_data))
else:
map[sql_id] = [(sql_id, sql_text, outline_data)]
count = 0
for k, v in map.items():
name = '%d_%s%d' % (g_config.tenant, db_name, count)
if len(v) == 1:
output(name, v[0][0], v[0][2])
else:
if check(v):
output(name, v[0][0], v[0][2])
else:
print >> sys.stderr, "sql_id = %s has conflict" % (v[0][0])
count += 1
def dump_outline():
conn = get_connect()
cur = conn.cursor()
cur.execute('select count(1) from oceanbase.__all_server')
rs = cur.fetchone()
server_count = rs[0]
cur.execute('select db_id, sql_id, statement, outline_data from oceanbase.GV$OB_PLAN_CACHE_PLAN_STAT where tenant_id = %d order by db_id' % g_config.tenant)
rs = cur.fetchall()
last_db_id = 0
items = []
for i in range(0, len(rs)):
db_id = get_real_db_id(1, rs[i][0])
if db_id == last_db_id:
items.append(rs[i][1:])
else:
if len(items) != 0:
dump_db_outline(last_db_id, items)
items = []
last_db_id = db_id
items.append(rs[i][1:])
if len(items) != 0:
dump_db_outline(last_db_id, items)
items = []
print >> sys.stderr, "%d database and %d outline dumped" % (g_stat.db_count, g_stat.outline_count)
#导入outline。判断outline是否存在。如果存在,那么就不导入了。
#
#查询__all_outline表。判断是否全部正确导入。
#
#输出导入信息。
#1. 总共导入了多少outline, 之前已经存在多少outline
#2. 中间有冲突的信息输出
def METHOD_NAME():
conn = get_connect()
cur = conn.cursor()
cur.execute("select effective_tenant_id()")
rs = cur.fetchone()
if rs[0] != g_config.tenant:
print >> sys.stderr, 'tenant id not equal %d <> %d' % (rs[0], g_config.tenant)
sys.exit(-1)
state = 0
for line in sys.stdin:
line = line.strip()
if len(line) != 0 and line[0] != '#':
if len(line) >= 3 and line[:3] == 'use':
print >> sys.stderr, 'change database: %s' % line
try:
cur.execute(line)
state = 0
g_import_stat.succ_db_count += 1
except:
g_import_stat.fail_db_count += 1
print >> sys.stderr, 'fail to execute: %s' % line
state = 1
else:
if state == 0:
try:
cur.execute(line)
g_import_stat.succ_outline_count += 1
except:
print >> sys.stderr, 'fail to execute: %s' % line
g_import_stat.fail_outline_count += 1
else:
g_import_stat.fail_outline_count += 1
print >> sys.stderr, 'skip to execute: %s' % line
print >> sys.stderr, "db succ %d | db fail %d | outline succ %d | outline fail %d" % (g_import_stat.succ_db_count, g_import_stat.fail_db_count, g_import_stat.succ_outline_count, g_import_stat.fail_outline_count)
if __name__ == '__main__':
if -1 == get_args(sys.argv[1:]):
sys.exit(-1)
if g_config.dump:
conn = get_connect()
cur = conn.cursor()
cur.execute("select effective_tenant_id()")
rs = cur.fetchone()
if rs[0] != 1:
print >> sys.stderr, 'please use sys tenant to dump'
sys.exit(-1)
cur.execute("select * from oceanbase.__all_tenant where tenant_id = %d" % g_config.tenant)
rs = cur.fetchall()
if 1 != len(rs):
print >> sys.stderr, 'no such tenant_id %d ' % g_config.tenant
sys.exit(-1)
dump_outline()
else:
METHOD_NAME()
|
6,388 | iterable to batches | """Utility functions."""
import contextlib
import hashlib
import re
import time
import urllib
from collections.abc import Collection, Generator, Iterable
from decimal import ROUND_HALF_UP, Decimal
from itertools import islice
from typing import cast
from xml.etree.ElementTree import Element # nosec # Element is not available from defusedxml, but only used as type
from defusedxml import ElementTree
from .exceptions import XMLRootElementError
from .type import URL, Namespaces, Response
async def parse_source_response_xml(response: Response, allowed_root_tags: Collection[str] | None = None) -> Element:
"""Parse the XML from the source response."""
tree = cast(Element, ElementTree.fromstring(await response.text(), forbid_dtd=False))
if allowed_root_tags and tree.tag not in allowed_root_tags:
raise XMLRootElementError(allowed_root_tags, tree.tag)
return tree
async def parse_source_response_xml_with_namespace(
response: Response,
allowed_root_tags: Collection[str] | None = None,
) -> tuple[Element, Namespaces]:
"""Parse the XML with namespace from the source response."""
tree = await parse_source_response_xml(response, allowed_root_tags)
# ElementTree has no API to get the namespace so we extract it from the root tag:
namespaces = {"ns": tree.tag.split("}")[0][1:]}
return tree, namespaces
Substitution = tuple[re.Pattern[str], str]
MEMORY_ADDRESS_SUB: Substitution = (re.compile(r" at 0x[0-9abcdef]+>"), ">")
TOKEN_SUB: Substitution = (re.compile(r"token=[^&]+"), "token=<redacted>")
KEY_SUB: Substitution = (re.compile(r"key=[0-9abcdef]+"), "key=<redacted>")
HASH_SUB: Substitution = (re.compile(r"(?i)[a-f0-9]{20,}"), "hashremoved")
def stable_traceback(traceback: str) -> str:
"""Remove memory addresses from the traceback so make it easier to compare tracebacks."""
for reg_exp, replacement in [MEMORY_ADDRESS_SUB, TOKEN_SUB, KEY_SUB]:
traceback = re.sub(reg_exp, replacement, traceback)
return traceback
def tokenless(url: str) -> str:
"""Strip private tokens from (text with) urls."""
return re.sub(TOKEN_SUB[0], TOKEN_SUB[1], url)
def hashless(url: URL) -> URL:
"""Strip hashes from the url so that it can be used as part of a issue key."""
scheme, netloc, path, query, fragment = urllib.parse.urlsplit(str(url))
path = re.sub(HASH_SUB[0], HASH_SUB[1], path)
query = re.sub(HASH_SUB[0], HASH_SUB[1], query)
fragment = re.sub(HASH_SUB[0], HASH_SUB[1], fragment)
return URL(urllib.parse.urlunsplit((scheme, netloc, path, query, fragment)))
def md5_hash(string: str) -> str:
"""Return a md5 hash of the string."""
md5 = hashlib.md5(string.encode("utf-8"), usedforsecurity=False) # noqa: DUO130,RUF100
return md5.hexdigest()
def sha1_hash(string: str) -> str:
"""Return a sha1 hash of the string."""
sha1 = hashlib.sha1(string.encode("utf-8"), usedforsecurity=False) # noqa: DUO130,RUF100
return sha1.hexdigest()
def is_regexp(string: str) -> bool:
"""Return whether the string looks like a regular expression."""
return bool(set("$^?.+*[]") & set(string))
def match_string_or_regular_expression(string: str, strings_and_or_regular_expressions: Collection[str]) -> bool:
"""Return whether the string is equal to one of the strings or matches one of the regular expressions."""
for string_or_regular_expression in strings_and_or_regular_expressions:
if is_regexp(string_or_regular_expression):
if re.match(string_or_regular_expression, string):
return True
elif string_or_regular_expression == string:
return True
return False
def METHOD_NAME(iterable: Iterable, batch_size: int) -> Iterable:
"""Produce batches of iterables, from a given iterable."""
iterable = iter(iterable)
return iter(lambda: tuple(islice(iterable, batch_size)), ())
def decimal_round_half_up(dec: Decimal | float) -> int:
"""Round decimal or float to nearest integer, with ties going away from zero."""
return int(Decimal(dec).to_integral_value(ROUND_HALF_UP))
class Clock:
"""Class to keep track of time."""
def __init__(self) -> None:
self.start = time.perf_counter()
self.duration = 0.0
def stop(self) -> None:
"""Stop the clock."""
self.duration = time.perf_counter() - self.start
@contextlib.contextmanager
def timer() -> Generator[Clock, None, None]:
"""Timer context manager."""
clock = Clock()
yield clock
clock.stop() |
6,389 | get veff | #!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: Timothy Berkelbach <tim.berkelbach@gmail.com>
# Qiming Sun <osirpt.sun@gmail.com>
#
'''
Non-relativistic Restricted Kohn-Sham for periodic systems with k-point sampling
See Also:
pyscf.pbc.dft.rks.py : Non-relativistic Restricted Kohn-Sham for periodic
systems at a single k-point
'''
import numpy as np
from pyscf import lib
from pyscf.lib import logger
from pyscf.pbc.scf import khf
from pyscf.pbc.dft import gen_grid
from pyscf.pbc.dft import rks
from pyscf.pbc.dft import multigrid
from pyscf import __config__
def METHOD_NAME(ks, cell=None, dm=None, dm_last=0, vhf_last=0, hermi=1,
kpts=None, kpts_band=None):
'''Coulomb + XC functional
.. note::
This is a replica of pyscf.dft.rks.get_veff with kpts added.
This function will change the ks object.
Args:
ks : an instance of :class:`RKS`
XC functional are controlled by ks.xc attribute. Attribute
ks.grids might be initialized.
dm : ndarray or list of ndarrays
A density matrix or a list of density matrices
Returns:
Veff : (nkpts, nao, nao) or (*, nkpts, nao, nao) ndarray
Veff = J + Vxc.
'''
if cell is None: cell = ks.cell
if dm is None: dm = ks.make_rdm1()
if kpts is None: kpts = ks.kpts
t0 = (logger.process_clock(), logger.perf_counter())
ni = ks._numint
if ks.nlc or ni.libxc.is_nlc(ks.xc):
raise NotImplementedError(f'NLC functional {ks.xc} + {ks.nlc}')
hybrid = ni.libxc.is_hybrid_xc(ks.xc)
if not hybrid and isinstance(ks.with_df, multigrid.MultiGridFFTDF):
n, exc, vxc = multigrid.nr_rks(ks.with_df, ks.xc, dm, hermi,
kpts, kpts_band,
with_j=True, return_j=False)
logger.debug(ks, 'nelec by numeric integration = %s', n)
t0 = logger.timer(ks, 'vxc', *t0)
return vxc
# ndim = 3 : dm.shape = (nkpts, nao, nao)
ground_state = (isinstance(dm, np.ndarray) and dm.ndim == 3 and
kpts_band is None)
# For UniformGrids, grids.coords does not indicate whehter grids are initialized
if ks.grids.non0tab is None:
ks.grids.build(with_non0tab=True)
if (isinstance(ks.grids, gen_grid.BeckeGrids) and
ks.small_rho_cutoff > 1e-20 and ground_state):
ks.grids = rks.prune_small_rho_grids_(ks, cell, dm, ks.grids, kpts)
t0 = logger.timer(ks, 'setting up grids', *t0)
if hermi == 2: # because rho = 0
n, exc, vxc = 0, 0, 0
else:
max_memory = ks.max_memory - lib.current_memory()[0]
n, exc, vxc = ks._numint.nr_rks(cell, ks.grids, ks.xc, dm, hermi,
kpts, kpts_band, max_memory=max_memory)
logger.debug(ks, 'nelec by numeric integration = %s', n)
t0 = logger.timer(ks, 'vxc', *t0)
nkpts = len(kpts)
weight = 1. / nkpts
if not hybrid:
vj = ks.get_j(cell, dm, hermi, kpts, kpts_band)
vxc += vj
else:
omega, alpha, hyb = ks._numint.rsh_and_hybrid_coeff(ks.xc, spin=cell.spin)
if getattr(ks.with_df, '_j_only', False) and nkpts > 1: # for GDF and MDF
ks.with_df._j_only = False
if ks.with_df._cderi is not None:
logger.warn(ks, 'df.j_only cannot be used with hybrid '
'functional. Rebuild cderi')
ks.with_df.build()
vj, vk = ks.get_jk(cell, dm, hermi, kpts, kpts_band)
vk *= hyb
if omega != 0:
vklr = ks.get_k(cell, dm, hermi, kpts, kpts_band, omega=omega)
vklr *= (alpha - hyb)
vk += vklr
vxc += vj - vk * .5
if ground_state:
exc -= np.einsum('Kij,Kji', dm, vk).real * .5 * .5 * weight
if ground_state:
ecoul = np.einsum('Kij,Kji', dm, vj).real * .5 * weight
else:
ecoul = None
vxc = lib.tag_array(vxc, ecoul=ecoul, exc=exc, vj=None, vk=None)
return vxc
@lib.with_doc(khf.get_rho.__doc__)
def get_rho(mf, dm=None, grids=None, kpts=None):
if dm is None: dm = mf.make_rdm1()
if grids is None: grids = mf.grids
if kpts is None: kpts = mf.kpts
if isinstance(mf.with_df, multigrid.MultiGridFFTDF):
rho = mf.with_df.get_rho(dm, kpts)
else:
rho = mf._numint.get_rho(mf.cell, dm, grids, kpts, mf.max_memory)
return rho
def energy_elec(mf, dm_kpts=None, h1e_kpts=None, vhf=None):
if h1e_kpts is None: h1e_kpts = mf.get_hcore(mf.cell, mf.kpts)
if dm_kpts is None: dm_kpts = mf.make_rdm1()
if vhf is None or getattr(vhf, 'ecoul', None) is None:
vhf = mf.METHOD_NAME(mf.cell, dm_kpts)
weight = 1./len(h1e_kpts)
e1 = weight * np.einsum('kij,kji', h1e_kpts, dm_kpts)
tot_e = e1 + vhf.ecoul + vhf.exc
mf.scf_summary['e1'] = e1.real
mf.scf_summary['coul'] = vhf.ecoul.real
mf.scf_summary['exc'] = vhf.exc.real
logger.debug(mf, 'E1 = %s Ecoul = %s Exc = %s', e1, vhf.ecoul, vhf.exc)
return tot_e.real, vhf.ecoul + vhf.exc
class KRKS(khf.KRHF, rks.KohnShamDFT):
'''RKS class adapted for PBCs with k-point sampling.
'''
def __init__(self, cell, kpts=np.zeros((1,3)), xc='LDA,VWN',
exxdiv=getattr(__config__, 'pbc_scf_SCF_exxdiv', 'ewald')):
khf.KRHF.__init__(self, cell, kpts, exxdiv=exxdiv)
rks.KohnShamDFT.__init__(self, xc)
def dump_flags(self, verbose=None):
khf.KRHF.dump_flags(self, verbose)
rks.KohnShamDFT.dump_flags(self, verbose)
return self
METHOD_NAME = METHOD_NAME
energy_elec = energy_elec
get_rho = get_rho
density_fit = rks._patch_df_beckegrids(khf.KRHF.density_fit)
rs_density_fit = rks._patch_df_beckegrids(khf.KRHF.rs_density_fit)
mix_density_fit = rks._patch_df_beckegrids(khf.KRHF.mix_density_fit)
def nuc_grad_method(self):
from pyscf.pbc.grad import krks
return krks.Gradients(self)
if __name__ == '__main__':
from pyscf.pbc import gto
cell = gto.Cell()
cell.unit = 'A'
cell.atom = 'C 0., 0., 0.; C 0.8917, 0.8917, 0.8917'
cell.a = '''0. 1.7834 1.7834
1.7834 0. 1.7834
1.7834 1.7834 0. '''
cell.basis = 'gth-szv'
cell.pseudo = 'gth-pade'
cell.verbose = 7
cell.output = '/dev/null'
cell.build()
mf = KRKS(cell, cell.make_kpts([2,1,1]))
print(mf.kernel()) |
6,390 | test model identifiers set globally | #
# Copyright (C) 2022
# Smithsonian Astrophysical Observatory
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""While frowned-upon in polite Python society, many Sherpa users
will be using the
from sherpa.astro.ui import *
form, either drectly or indirectly (when using the sherpa application
in CIAO). So this adds some basic tests of this behavior, as there are
a few "interesting" things to review.
"""
import sys
import pytest
from sherpa.astro.ui import *
from sherpa.astro.ui.utils import Session
import sherpa.models.basic
from sherpa.ui.utils import ModelWrapper
# Select models from sherpa.models.basic and sherpa.astro.models
#
MODEL_NAMES = ["const1d", "gauss2d", "lorentz1d"]
def test_secret_session_is_created():
"""This is a regression test.
The sherpa.ui module does not export _session but astro does.
"""
assert isinstance(_session, Session)
@pytest.mark.parametrize("model", MODEL_NAMES)
def test_model_is_known(model):
"""Check we have loaded some models"""
assert model in list_models()
@pytest.mark.parametrize("model", MODEL_NAMES)
def test_model_is_defined(model):
"""Check we have loaded some models"""
assert model in globals()
sym = globals()[model]
assert isinstance(sym, ModelWrapper)
@pytest.mark.parametrize("as_string", [True, False])
def METHOD_NAME(as_string):
"""Check we create a global symbol for the models.
See also the same test in
sherpa/astro/ui/tests/test_astro_session.py
sherpa/astro/ui/tests/test_astro_ui_unit.py
"""
# The "global" symbol table depends on what has been run before. We
# could try and make sure that we are "clean", but this makes checking
# what this test is doing hard to do, so we remove the symbols just
# in case.
#
for name in ["mdl1", "mdl2"]:
try:
del sys.modules["__main__"].__dict__[name]
except KeyError:
pass
dataspace1d(1, 10, 1)
for store in [globals(), locals(), sys.modules["__main__"].__dict__]:
assert "mdl1" not in store
assert "mdl2" not in store
if as_string:
set_source("const1d.mdl1 + gauss1d.mdl2")
else:
set_source(const1d.mdl1 + gauss1d.mdl2)
for store in [globals(), locals()]:
assert "mdl1" not in store
assert "mdl2" not in store
assert "mdl1" in sys.modules["__main__"].__dict__
assert "mdl2" in sys.modules["__main__"].__dict__
assert isinstance(sys.modules["__main__"].__dict__["mdl1"],
sherpa.models.basic.Const1D)
assert isinstance(sys.modules["__main__"].__dict__["mdl2"],
sherpa.models.basic.Gauss1D)
clean()
def test_delete_model_removes_global_identifier():
"""Check we create a global symbol for the models."""
create_model_component("const1d", "mdl1")
create_model_component("gauss1d", "mdl2")
assert "mdl1" in sys.modules["__main__"].__dict__
assert "mdl2" in sys.modules["__main__"].__dict__
delete_model_component("mdl1")
delete_model_component("mdl2")
assert "mdl1" not in sys.modules["__main__"].__dict__
assert "mdl2" not in sys.modules["__main__"].__dict__
def test_what_happens_if_the_same_identifier_is_reused():
"""Regression test.
It's not obvious what we want to do here.
"""
# This could error out or set the "first" name as the winner, or
# the "last", or something else. Test the current behavior.
#
combined = gauss1d.bob + const1d.bob
assert isinstance(bob, sherpa.models.basic.Const1D)
delete_model_component("bob") |
6,391 | ingest | from typing import Dict, List, Any, Callable, Optional, Union
import numpy as np
import deeplake
from deeplake.core.dataset import Dataset as DeepLakeDataset
from deeplake.core.vectorstore.vector_search import utils
from deeplake.util.exceptions import (
TransformError,
FailedIngestionError,
IncorrectEmbeddingShapeError,
)
from deeplake.constants import (
MAX_VECTORSTORE_INGESTION_RETRY_ATTEMPTS,
MAX_CHECKPOINTING_INTERVAL,
)
import sys
from deeplake.constants import MAX_BYTES_PER_MINUTE, TARGET_BYTE_SIZE
class DataIngestion:
def __init__(
self,
elements: List[Dict[str, Any]],
dataset: DeepLakeDataset,
embedding_function: Optional[List[Callable]],
embedding_tensor: Optional[List[str]],
ingestion_batch_size: int,
num_workers: int,
retry_attempt: int,
total_samples_processed: int,
logger,
):
self.elements = elements
self.dataset = dataset
self.embedding_function = embedding_function
self.ingestion_batch_size = ingestion_batch_size
self.num_workers = num_workers
self.retry_attempt = retry_attempt
self.total_samples_processed = total_samples_processed
self.embedding_tensor = embedding_tensor
self.logger = logger
def collect_batched_data(self, ingestion_batch_size=None):
ingestion_batch_size = ingestion_batch_size or self.ingestion_batch_size
batch_size = min(ingestion_batch_size, len(self.elements))
if batch_size == 0:
raise ValueError("batch_size must be a positive number greater than zero.")
elements = self.elements
if self.total_samples_processed:
elements = self.elements[self.total_samples_processed :]
batched = [
elements[i : i + batch_size] for i in range(0, len(elements), batch_size)
]
if self.logger:
batch_upload_str = f"Batch upload: {len(elements)} samples are being uploaded in {len(batched)} batches of batch size {batch_size}"
if self.total_samples_processed:
batch_upload_str = (
f"Batch reupload: {len(self.elements)-len(elements)} samples already uploaded, while "
f"{len(elements)} samples are being uploaded in {len(batched)} batches of batch size {batch_size}"
)
self.logger.warning(batch_upload_str)
return batched
def get_num_workers(self, batched):
return min(self.num_workers, len(batched) // max(self.num_workers, 1))
def get_checkpoint_interval_and_batched_data(self, batched, num_workers):
checkpoint_interval = max(
int(
(0.1 * len(batched) // max(num_workers, 1)) * max(num_workers, 1),
),
num_workers,
1,
)
if checkpoint_interval * self.ingestion_batch_size > MAX_CHECKPOINTING_INTERVAL:
checkpoint_interval = 100
return checkpoint_interval
def run(self):
batched_data = self.collect_batched_data()
num_workers = self.get_num_workers(batched_data)
checkpoint_interval = self.get_checkpoint_interval_and_batched_data(
batched_data, num_workers=num_workers
)
self.METHOD_NAME(
batched=batched_data,
num_workers=num_workers,
checkpoint_interval=checkpoint_interval,
)
def METHOD_NAME(
self,
batched,
num_workers,
checkpoint_interval,
):
try:
ingest(
embedding_function=self.embedding_function,
embedding_tensor=self.embedding_tensor,
).eval(
batched,
self.dataset,
num_workers=num_workers,
checkpoint_interval=checkpoint_interval,
verbose=False,
)
except Exception as e:
if isinstance(e.__cause__, IncorrectEmbeddingShapeError):
raise IncorrectEmbeddingShapeError()
self.retry_attempt += 1
last_checkpoint = self.dataset.version_state["commit_node"].parent
self.total_samples_processed += (
last_checkpoint.total_samples_processed * self.ingestion_batch_size
)
index = int(self.total_samples_processed / self.ingestion_batch_size)
if isinstance(e, TransformError) and e.index is not None:
index += e.index
if self.retry_attempt > MAX_VECTORSTORE_INGESTION_RETRY_ATTEMPTS:
raise FailedIngestionError(
f"Ingestion failed at batch index {index}. Maximum retry attempts exceeded. You can resume ingestion "
"from the latest saved checkpoint.\n"
"To do that you should run:\n"
"```\n"
"deeplake_vector_store.add(\n"
" texts=texts,\n"
" metadatas=metadatas,\n"
" ids=ids,\n"
" embeddings=embeddings,\n"
f" total_samples_processed={self.total_samples_processed},\n"
")\n"
"```"
)
data_ingestion = DataIngestion(
elements=self.elements,
dataset=self.dataset,
embedding_function=self.embedding_function,
ingestion_batch_size=self.ingestion_batch_size,
num_workers=num_workers,
retry_attempt=self.retry_attempt,
total_samples_processed=self.total_samples_processed,
logger=self.logger,
embedding_tensor=self.embedding_tensor,
)
data_ingestion.run()
@deeplake.compute
def ingest(
sample_in: list,
sample_out: list,
embedding_function,
embedding_tensor,
) -> None:
embeds: List[Optional[np.ndarray]] = [None] * len(sample_in)
if embedding_function:
try:
for func, tensor in zip(embedding_function, embedding_tensor):
embedding_data = [s[tensor] for s in sample_in]
embeddings = func(embedding_data)
except Exception as exc:
raise Exception(
"Could not use embedding function. Please try again with a different embedding function."
)
shape = np.array(embeddings[0]).shape
embeds = []
for e in embeddings:
embedding = np.array(e, dtype=np.float32)
if shape != embedding.shape:
raise IncorrectEmbeddingShapeError()
embeds.append(embedding)
for s, emb in zip(sample_in, embeds):
sample_in_i = {tensor_name: s[tensor_name] for tensor_name in s}
if embedding_function:
for tensor in embedding_tensor:
sample_in_i[tensor] = np.array(emb, dtype=np.float32)
sample_out.append(sample_in_i) |
6,392 | list workspace subscription secrets | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'ListWorkspaceSubscriptionSecretsResult',
'AwaitableListWorkspaceSubscriptionSecretsResult',
'list_workspace_subscription_secrets',
'list_workspace_subscription_secrets_output',
]
@pulumi.output_type
class ListWorkspaceSubscriptionSecretsResult:
"""
Subscription keys.
"""
def __init__(__self__, primary_key=None, secondary_key=None):
if primary_key and not isinstance(primary_key, str):
raise TypeError("Expected argument 'primary_key' to be a str")
pulumi.set(__self__, "primary_key", primary_key)
if secondary_key and not isinstance(secondary_key, str):
raise TypeError("Expected argument 'secondary_key' to be a str")
pulumi.set(__self__, "secondary_key", secondary_key)
@property
@pulumi.getter(name="primaryKey")
def primary_key(self) -> Optional[str]:
"""
Subscription primary key.
"""
return pulumi.get(self, "primary_key")
@property
@pulumi.getter(name="secondaryKey")
def secondary_key(self) -> Optional[str]:
"""
Subscription secondary key.
"""
return pulumi.get(self, "secondary_key")
class AwaitableListWorkspaceSubscriptionSecretsResult(ListWorkspaceSubscriptionSecretsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListWorkspaceSubscriptionSecretsResult(
primary_key=self.primary_key,
secondary_key=self.secondary_key)
def METHOD_NAME(resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
sid: Optional[str] = None,
workspace_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListWorkspaceSubscriptionSecretsResult:
"""
Gets the specified Subscription keys.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str service_name: The name of the API Management service.
:param str sid: Subscription entity Identifier. The entity represents the association between a user and a product in API Management.
:param str workspace_id: Workspace identifier. Must be unique in the current API Management service instance.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
__args__['sid'] = sid
__args__['workspaceId'] = workspace_id
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:apimanagement/v20230301preview:listWorkspaceSubscriptionSecrets', __args__, opts=opts, typ=ListWorkspaceSubscriptionSecretsResult).value
return AwaitableListWorkspaceSubscriptionSecretsResult(
primary_key=pulumi.get(__ret__, 'primary_key'),
secondary_key=pulumi.get(__ret__, 'secondary_key'))
@_utilities.lift_output_func(METHOD_NAME)
def list_workspace_subscription_secrets_output(resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
sid: Optional[pulumi.Input[str]] = None,
workspace_id: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[ListWorkspaceSubscriptionSecretsResult]:
"""
Gets the specified Subscription keys.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str service_name: The name of the API Management service.
:param str sid: Subscription entity Identifier. The entity represents the association between a user and a product in API Management.
:param str workspace_id: Workspace identifier. Must be unique in the current API Management service instance.
"""
... |
6,393 | run single test | from __future__ import print_function
import http_utils
import time
# note: run once with moov cache enabled and once with moov cache disabled
# it's recommended to disable debug logs since this test generates a lot of log lines
'''
nginx.conf
location /local/content/ {
vod none;
vod_mode local;
alias /path/to/mp4/files/;
}
location /remote/content/ {
vod none;
vod_mode remote;
vod_upstream backend;
}
location /local/mp4/content {
mp4;
alias /path/to/mp4/files/;
}
the following changes to ngx_http_mp4_module are required for a successful comparison
in ngx_http_mp4_handler replace
ngx_set_errno(0);
start = (int) (strtod((char *) value.data, NULL) * 1000);
if (ngx_errno != 0) {
start = -1;
}
with
start = ngx_atofp(value.data, value.len, 3);
in ngx_http_mp4_handler replace
ngx_set_errno(0);
end = (int) (strtod((char *) value.data, NULL) * 1000);
if (ngx_errno != 0) {
end = -1;
}
with
end = ngx_atofp(value.data, value.len, 3);
in ngx_http_mp4_update_mdat_atom replace
atom_data_size = end_offset - start_offset;
mp4->mdat_data.buf->file_pos = start_offset;
mp4->mdat_data.buf->file_last = end_offset;
with
if (start_offset >= end_offset)
{
atom_data_size = 0;
mp4->mdat_data.buf->in_file = 0;
}
else
{
atom_data_size = end_offset - start_offset;
mp4->mdat_data.buf->file_pos = start_offset;
mp4->mdat_data.buf->file_last = end_offset;
}
in ngx_http_mp4_update_stts_atom after
if (ngx_http_mp4_crop_stts_data(mp4, trak, 0) != NGX_OK) {
return NGX_ERROR;
}
add
if (trak->start_sample >= trak->end_sample)
return NGX_ERROR;
in ngx_http_mp4_crop_stsc_data replace
uint32_t start_sample, chunk, samples, id, next_chunk, n,
prev_samples;
with
uint32_t start_sample, chunk, samples = 0, id, next_chunk, n,
prev_samples;
in ngx_http_mp4_crop_stsc_data replace
chunk = ngx_mp4_get_32value(entry->chunk);
samples = ngx_mp4_get_32value(entry->samples);
id = ngx_mp4_get_32value(entry->id);
prev_samples = 0;
with
prev_samples = samples;
chunk = ngx_mp4_get_32value(entry->chunk);
samples = ngx_mp4_get_32value(entry->samples);
id = ngx_mp4_get_32value(entry->id);
'''
FILE_NAME = 'b.mp4'
FILE_DURATION = 728000
FILE_BASES = ['remote', 'local']
URL1_FORMAT = {
'prefix': 'http://localhost:8001/{fileBase}/content/',
'start': 'clipFrom/%d%03d/',
'end': 'clipTo/%d%03d/',
'suffix': FILE_NAME,
'noEndSupport': True,
}
URL2_FORMAT = {
'prefix': 'http://localhost:8001/local/mp4/content/%s?' % FILE_NAME,
'start': 'start=%d.%03d&',
'end': 'end=%d.%03d&',
'suffix': '',
'noEndSupport': False,
}
TEST_SUITES = [
{
'min': 0,
'max': 5000,
'step': 25,
'testNoEnd': False,
},
{
'min': FILE_DURATION - 5000,
'max': FILE_DURATION,
'step': 25,
'testNoEnd': True,
},
]
def buildUrl(urlFormat, fileBase, start, end):
if not urlFormat['noEndSupport'] and end <= 0:
end = 100000000
result = urlFormat['prefix']
if start > 0:
result += urlFormat['start'] % (start / 1000, start % 1000)
if end > 0:
result += urlFormat['end'] % (end / 1000, end % 1000)
result += urlFormat['suffix']
return result.replace('{fileBase}', fileBase)
def getUrl(url):
startTime = time.time()
code, headers, body = http_utils.getUrl(url)
print ('Info: get %s took %s' % (url, time.time() - startTime))
if code == 0:
print(body)
return (code, body)
def METHOD_NAME(fileBase, start, end):
if start == 0 and end == 0:
return
url1 = buildUrl(URL1_FORMAT, fileBase, start, end)
url2 = buildUrl(URL2_FORMAT, fileBase, start, end)
print('curling %s' % url1)
code1, data1 = getUrl(url1)
print('curling %s' % url2)
code2, data2 = getUrl(url2)
if code1 != code2:
if set([code1, code2]) == set([400, 500]):
return
print('Error: different codes %s %s' % (code1, code2))
return
if data1 != data2:
print('Error: %s %s' % (url1, url2))
def runTestSuite(fileBase, testCase):
start = testCase['min']
while start < testCase['max']:
end = start
while end < testCase['max']:
if testCase['testNoEnd'] and start == end:
METHOD_NAME(fileBase, start, 0)
else:
METHOD_NAME(fileBase, start, end)
end += testCase['step']
start += testCase['step']
def runTestSuites(fileBase):
for testCase in TEST_SUITES:
runTestSuite(fileBase, testCase)
for fileBase in FILE_BASES:
runTestSuites(fileBase) |
6,394 | set up base | from __future__ import absolute_import
from builtins import str
from django.conf import settings
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.management import call_command
from django.db import connection
from django.db.migrations.executor import MigrationExecutor
from django.test import TestCase
from django.test import TransactionTestCase
from django.urls import reverse_lazy
from rest_framework.authtoken.models import Token
from rest_framework.test import APIClient
from rest_framework.test import APIRequestFactory
from rest_framework.test import APITestCase
from rest_framework.test import force_authenticate
from . import testdata
from contentcuration.models import User
from contentcuration.utils import minio_utils
class BucketTestClassMixin(object):
@classmethod
def create_bucket(cls):
minio_utils.ensure_storage_bucket_public(will_sleep=False)
@classmethod
def delete_bucket(cls):
minio_utils.ensure_bucket_deleted()
class BucketTestMixin:
"""
Handles bucket setup and tear down for test classes. If you want your entire TestCase to share the same bucket,
call create_bucket in setUpClass and then set persist_bucket to True, then make sure you call self.delete_bucket()
in tearDownClass.
"""
persist_bucket = False
@classmethod
def create_bucket(cls):
minio_utils.ensure_storage_bucket_public(will_sleep=False)
@classmethod
def delete_bucket(cls):
minio_utils.ensure_bucket_deleted()
def setUp(self):
raise Exception("Called?")
if not self.persist_bucket:
self.create_bucket()
def tearDown(self):
if not self.persist_bucket:
self.delete_bucket()
class StudioTestCase(TestCase, BucketTestMixin):
@classmethod
def setUpClass(cls):
super(StudioTestCase, cls).setUpClass()
call_command("loadconstants")
cls.admin_user = User.objects.create_superuser(
"big_shot", "bigshot@reallybigcompany.com", "password"
)
def setUp(self):
if not self.persist_bucket:
self.create_bucket()
def METHOD_NAME(self):
if not self.persist_bucket:
self.create_bucket()
self.channel = testdata.channel()
self.user = testdata.user()
self.channel.editors.add(self.user)
self.channel.main_tree.refresh_from_db()
def tearDown(self):
if not self.persist_bucket:
self.delete_bucket()
def admin_client(self):
client = APIClient()
client.force_authenticate(self.admin_user)
return client
def upload_temp_file(self, data, preset="document", ext="pdf"):
"""
Uploads a file to the server using an authorized client.
"""
fileobj_temp = testdata.create_studio_file(data, preset=preset, ext=ext)
name = fileobj_temp["name"]
f = SimpleUploadedFile(name, data)
file_upload_url = str(reverse_lazy("api_file_upload"))
return fileobj_temp, self.admin_client().post(file_upload_url, {"file": f})
def sign_in(self, user=None):
if not user:
user = self.user
user.save()
self.client.force_login(user)
def get(self, url, data=None, follow=False, secure=False):
return self.client.get(
url,
data=data,
follow=follow,
secure=secure,
HTTP_USER_AGENT=settings.SUPPORTED_BROWSERS[0],
)
class StudioAPITestCase(APITestCase, BucketTestMixin):
@classmethod
def setUpClass(cls):
super(StudioAPITestCase, cls).setUpClass()
call_command("loadconstants")
def setUp(self):
if not self.persist_bucket:
self.create_bucket()
def tearDown(self):
if not self.persist_bucket:
self.delete_bucket()
def sign_in(self, user=None):
if not user:
user = self.user
user.save()
self.client.force_login(user)
def get(self, url, data=None, follow=False, secure=False):
return self.client.get(
url,
data=data,
follow=follow,
secure=secure,
HTTP_USER_AGENT=settings.SUPPORTED_BROWSERS[0],
)
class BaseAPITestCase(StudioAPITestCase):
def setUp(self):
super(BaseAPITestCase, self).setUp()
self.channel = testdata.channel()
self.user = testdata.user()
self.channel.editors.add(self.user)
self.token, _new = Token.objects.get_or_create(user=self.user)
self.client = APIClient()
self.client.force_authenticate(
self.user
) # This will skip all authentication checks
self.channel.main_tree.refresh_from_db()
def delete(self, url):
return self.client.delete(url)
def get(self, url):
return self.client.get(url)
def post(self, url, data, format="json"):
return self.client.post(url, data, format=format)
def put(self, url, data, format="json"):
return self.client.put(url, data, format=format)
def create_get_request(self, url, *args, **kwargs):
factory = APIRequestFactory()
request = factory.get(url, *args, **kwargs)
request.user = self.user
force_authenticate(request, user=self.user)
return request
def create_post_request(self, url, *args, **kwargs):
factory = APIRequestFactory()
request = factory.post(url, *args, **kwargs)
request.user = self.user
force_authenticate(request, user=self.user)
return request
# Modified from https://www.caktusgroup.com/blog/2016/02/02/writing-unit-tests-django-migrations/
class MigrationTestCase(TransactionTestCase):
migrate_from = None
migrate_to = None
app = None
def setUp(self):
assert (
self.migrate_from and self.migrate_to
), "TestCase '{}' must define migrate_from and migrate_to properties".format(
type(self).__name__
)
migrate_from = [(self.app, self.migrate_from)]
migrate_to = [(self.app, self.migrate_to)]
executor = MigrationExecutor(connection)
old_apps = executor.loader.project_state(migrate_from).apps
# Reverse to the original migration
executor.migrate(migrate_from)
self.setUpBeforeMigration(old_apps)
# Run the migration to test
executor = MigrationExecutor(connection)
executor.loader.build_graph() # reload.
executor.migrate(migrate_to)
self.apps = executor.loader.project_state(migrate_to).apps
@classmethod
def tearDownClass(cls):
"""
Ensures that the DB is reset and fully migrated due to this
test class's selective migrations
"""
call_command("migrate") |
6,395 | plot mcse | """Bokeh mcseplot."""
import numpy as np
from bokeh.models import ColumnDataSource, Span
from bokeh.models.glyphs import Scatter
from bokeh.models.annotations import Title
from scipy.stats import rankdata
from ....stats.stats_utils import quantile as _quantile
from ...plot_utils import _scale_fig_size
from .. import show_layout
from . import backend_kwarg_defaults, create_axes_grid
def METHOD_NAME(
ax,
plotters,
length_plotters,
rows,
cols,
figsize,
errorbar,
rug,
data,
probs,
kwargs, # pylint: disable=unused-argument
extra_methods,
mean_mcse,
sd_mcse,
textsize,
labeller,
text_kwargs, # pylint: disable=unused-argument
rug_kwargs,
extra_kwargs,
idata,
rug_kind,
backend_kwargs,
show,
):
"""Bokeh mcse plot."""
if backend_kwargs is None:
backend_kwargs = {}
backend_kwargs = {
**backend_kwarg_defaults(),
**backend_kwargs,
}
(figsize, *_, _linewidth, _markersize) = _scale_fig_size(figsize, textsize, rows, cols)
extra_kwargs = {} if extra_kwargs is None else extra_kwargs
extra_kwargs.setdefault("linewidth", _linewidth / 2)
extra_kwargs.setdefault("color", "black")
extra_kwargs.setdefault("alpha", 0.5)
if ax is None:
ax = create_axes_grid(
length_plotters,
rows,
cols,
figsize=figsize,
backend_kwargs=backend_kwargs,
)
else:
ax = np.atleast_2d(ax)
for (var_name, selection, isel, x), ax_ in zip(
plotters, (item for item in ax.flatten() if item is not None)
):
if errorbar or rug:
values = data[var_name].sel(**selection).values.flatten()
if errorbar:
quantile_values = _quantile(values, probs)
ax_.dash(probs, quantile_values)
ax_.multi_line(
list(zip(probs, probs)),
[(quant - err, quant + err) for quant, err in zip(quantile_values, x)],
)
else:
ax_.circle(probs, x)
if extra_methods:
mean_mcse_i = mean_mcse[var_name].sel(**selection).values.item()
sd_mcse_i = sd_mcse[var_name].sel(**selection).values.item()
hline_mean = Span(
location=mean_mcse_i,
dimension="width",
line_color=extra_kwargs["color"],
line_width=extra_kwargs["linewidth"] * 2,
line_alpha=extra_kwargs["alpha"],
)
ax_.renderers.append(hline_mean)
hline_sd = Span(
location=sd_mcse_i,
dimension="width",
line_color="black",
line_width=extra_kwargs["linewidth"],
line_alpha=extra_kwargs["alpha"],
)
ax_.renderers.append(hline_sd)
if rug:
if rug_kwargs is None:
rug_kwargs = {}
if not hasattr(idata, "sample_stats"):
raise ValueError("InferenceData object must contain sample_stats for rug plot")
if not hasattr(idata.sample_stats, rug_kind):
raise ValueError(f"InferenceData does not contain {rug_kind} data")
rug_kwargs.setdefault("space", 0.1)
_rug_kwargs = {}
_rug_kwargs.setdefault("size", 8)
_rug_kwargs.setdefault("line_color", rug_kwargs.get("line_color", "black"))
_rug_kwargs.setdefault("line_width", 1)
_rug_kwargs.setdefault("line_alpha", 0.35)
_rug_kwargs.setdefault("angle", np.pi / 2)
mask = idata.sample_stats[rug_kind].values.flatten()
values = rankdata(values, method="average")[mask]
if errorbar:
rug_x, rug_y = (
values / (len(mask) - 1),
np.full_like(
values,
min(
0,
min(quantile_values)
- (max(quantile_values) - min(quantile_values)) * 0.05,
),
),
)
hline = Span(
location=min(
0,
min(quantile_values) - (max(quantile_values) - min(quantile_values)) * 0.05,
),
dimension="width",
line_color="black",
line_width=_linewidth,
line_alpha=0.7,
)
else:
rug_x, rug_y = (
values / (len(mask) - 1),
np.full_like(
values,
0,
),
)
hline = Span(
location=0,
dimension="width",
line_color="black",
line_width=_linewidth,
line_alpha=0.7,
)
ax_.renderers.append(hline)
glyph = Scatter(x="rug_x", y="rug_y", marker="dash", **_rug_kwargs)
cds_rug = ColumnDataSource({"rug_x": np.asarray(rug_x), "rug_y": np.asarray(rug_y)})
ax_.add_glyph(cds_rug, glyph)
title = Title()
title.text = labeller.make_label_vert(var_name, selection, isel)
ax_.title = title
ax_.xaxis.axis_label = "Quantile"
ax_.yaxis.axis_label = (
r"Value $\pm$ MCSE for quantiles" if errorbar else "MCSE for quantiles"
)
if not errorbar:
ax_.y_range._property_values["start"] = -0.05 # pylint: disable=protected-access
ax_.y_range._property_values["end"] = 1 # pylint: disable=protected-access
show_layout(ax, show)
return ax |
6,396 | tear down | """
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import ipaddress
import time
import unittest
import s1ap_types
from integ_tests.s1aptests import s1ap_wrapper
from integ_tests.s1aptests.s1ap_utils import SpgwUtil
from s1ap_utils import MagmadUtil
class Test3495TimerForDedicatedBearerWithMmeRestart(unittest.TestCase):
"""Test case validates the functionality of 3495 timer for
dedicated bearer while MME restarts
"""
def setUp(self):
"""Initialize"""
self._s1ap_wrapper = s1ap_wrapper.TestWrapper(
stateless_mode=MagmadUtil.stateless_cmds.ENABLE,
)
self._spgw_util = SpgwUtil()
def METHOD_NAME(self):
"""Cleanup"""
self._s1ap_wrapper.cleanup()
def test_3495_timer_for_dedicated_bearer_with_mme_restart(self):
"""Test case validates the functionality of 3495 timer for
dedicated bearer while MME restarts
Step1: UE attaches to network
Step2: Creates a dedicated bearer
Step3: Initiates dedicated bearer deletion, as part of which mme sends
deactivate EPS bearer context request and starts 3495 timer
Step4: TFW shall not respond to first Deactivate EPS bearer context
request message
Step5: Send command to Magma to restart mme service
Step6: TFW shall receive re-transmitted Deactivate EPS bearer context
request message and send Deactivate EPS bearer Context Accept
Step7: TFW shall initiate Detach procedure.
"""
num_ues = 1
self._s1ap_wrapper.configUEDevice(num_ues)
for i in range(num_ues):
req = self._s1ap_wrapper.ue_req
print(
"********************** Running End to End attach for ",
"UE id ",
req.ue_id,
)
# Now actually complete the attach
attach = self._s1ap_wrapper._s1_util.attach(
req.ue_id,
s1ap_types.tfwCmd.UE_END_TO_END_ATTACH_REQUEST,
s1ap_types.tfwCmd.UE_ATTACH_ACCEPT_IND,
s1ap_types.ueAttachAccept_t,
)
addr = attach.esmInfo.pAddr.addrInfo
default_ip = ipaddress.ip_address(bytes(addr[:4]))
# Wait on EMM Information from MME
self._s1ap_wrapper._s1_util.receive_emm_info()
print("Sleeping for 5 seconds")
time.sleep(5)
print(
"********************** Adding dedicated bearer to IMSI",
"".join([str(i) for i in req.imsi]),
)
# Create default flow list
flow_list = self._spgw_util.create_default_ipv4_flows()
self._spgw_util.create_bearer(
"IMSI" + "".join([str(i) for i in req.imsi]),
attach.esmInfo.epsBearerId,
flow_list,
)
response = self._s1ap_wrapper.s1_util.get_response()
assert response.msg_type == s1ap_types.tfwCmd.UE_ACT_DED_BER_REQ.value
act_ded_ber_ctxt_req = response.cast(
s1ap_types.UeActDedBearCtxtReq_t,
)
self._s1ap_wrapper.sendActDedicatedBearerAccept(
req.ue_id, act_ded_ber_ctxt_req.bearerId,
)
print("Sleeping for 5 seconds")
time.sleep(5)
# Verify if flow rules are created
dl_flow_rules = {
default_ip: [flow_list],
}
# 1 UL flow for default bearer + 1 for dedicated bearer
num_ul_flows = 2
self._s1ap_wrapper.s1_util.verify_flow_rules(
num_ul_flows, dl_flow_rules,
)
print(
"********************** Deleting dedicated bearer for IMSI",
"".join([str(i) for i in req.imsi]),
)
self._spgw_util.delete_bearer(
"IMSI" + "".join([str(i) for i in req.imsi]),
attach.esmInfo.epsBearerId,
act_ded_ber_ctxt_req.bearerId,
)
response = self._s1ap_wrapper.s1_util.get_response()
assert response.msg_type == s1ap_types.tfwCmd.UE_DEACTIVATE_BER_REQ.value
print("******************* Received deactivate eps bearer context")
# Do not send deactivate eps bearer context accept
print(
"************************* Restarting MME service on",
"gateway",
)
wait_for_restart = 30
self._s1ap_wrapper.magmad_util.restart_services(
["mme"], wait_for_restart,
)
response = self._s1ap_wrapper.s1_util.get_response()
assert response.msg_type == s1ap_types.tfwCmd.UE_DEACTIVATE_BER_REQ.value
deactv_bearer_req = response.cast(s1ap_types.UeDeActvBearCtxtReq_t)
self._s1ap_wrapper.sendDeactDedicatedBearerAccept(
req.ue_id, deactv_bearer_req.bearerId,
)
print("Sleeping for 5 seconds")
time.sleep(5)
# Verify if flow rules are deleted for dedicated bearer
dl_flow_rules = {
default_ip: [],
}
# 1 UL flow for default bearer
num_ul_flows = 1
self._s1ap_wrapper.s1_util.verify_flow_rules(
num_ul_flows, dl_flow_rules,
)
print(
"********************** Running UE detach for UE id ",
req.ue_id,
)
# Now detach the UE
self._s1ap_wrapper.s1_util.detach(
req.ue_id,
s1ap_types.ueDetachType_t.UE_SWITCHOFF_DETACH.value,
False,
)
if __name__ == "__main__":
unittest.main() |
6,397 | get web app domain ownership identifier | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetWebAppDomainOwnershipIdentifierResult',
'AwaitableGetWebAppDomainOwnershipIdentifierResult',
'get_web_app_domain_ownership_identifier',
'get_web_app_domain_ownership_identifier_output',
]
@pulumi.output_type
class GetWebAppDomainOwnershipIdentifierResult:
"""
A domain specific resource identifier.
"""
def __init__(__self__, id=None, kind=None, name=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetWebAppDomainOwnershipIdentifierResult(GetWebAppDomainOwnershipIdentifierResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWebAppDomainOwnershipIdentifierResult(
id=self.id,
kind=self.kind,
name=self.name,
type=self.type)
def METHOD_NAME(domain_ownership_identifier_name: Optional[str] = None,
name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWebAppDomainOwnershipIdentifierResult:
"""
Get domain ownership identifier for web app.
:param str domain_ownership_identifier_name: Name of domain ownership identifier.
:param str name: Name of the app.
:param str resource_group_name: Name of the resource group to which the resource belongs.
"""
__args__ = dict()
__args__['domainOwnershipIdentifierName'] = domain_ownership_identifier_name
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:web/v20181101:getWebAppDomainOwnershipIdentifier', __args__, opts=opts, typ=GetWebAppDomainOwnershipIdentifierResult).value
return AwaitableGetWebAppDomainOwnershipIdentifierResult(
id=pulumi.get(__ret__, 'id'),
kind=pulumi.get(__ret__, 'kind'),
name=pulumi.get(__ret__, 'name'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(METHOD_NAME)
def get_web_app_domain_ownership_identifier_output(domain_ownership_identifier_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetWebAppDomainOwnershipIdentifierResult]:
"""
Get domain ownership identifier for web app.
:param str domain_ownership_identifier_name: Name of domain ownership identifier.
:param str name: Name of the app.
:param str resource_group_name: Name of the resource group to which the resource belongs.
"""
... |
6,398 | test process custom event | import unittest
from unittest import mock
from queue import Queue
from flumine.controls.loggingcontrols import LoggingControl, EventType
class TestLoggingControl(unittest.TestCase):
def setUp(self):
self.logging_control = LoggingControl()
def test_init(self):
self.assertIsInstance(self.logging_control.logging_queue, Queue)
self.assertEqual(self.logging_control.cache, [])
self.assertEqual(self.logging_control.NAME, "LOGGING_CONTROL")
def test_run(self):
self.logging_control.logging_queue.put(None)
self.logging_control.run()
def test_run_error(self):
self.logging_control.logging_queue.put(1)
self.logging_control.logging_queue.put(None)
self.logging_control.run()
@mock.patch("flumine.controls.loggingcontrols.LoggingControl._process_config")
def test_process_event_config(self, mock_process_config):
mock_event = mock.Mock()
mock_event.EVENT_TYPE = EventType.CONFIG
self.logging_control.process_event(mock_event)
mock_process_config.assert_called_with(mock_event)
@mock.patch("flumine.controls.loggingcontrols.LoggingControl._process_strategy")
def test_process_event_strategy(self, mock_process_strategy):
mock_event = mock.Mock()
mock_event.EVENT_TYPE = EventType.STRATEGY
self.logging_control.process_event(mock_event)
mock_process_strategy.assert_called_with(mock_event)
@mock.patch("flumine.controls.loggingcontrols.LoggingControl._process_market")
def test_process_event_market(self, _process_market):
mock_event = mock.Mock()
mock_event.EVENT_TYPE = EventType.MARKET
self.logging_control.process_event(mock_event)
_process_market.assert_called_with(mock_event)
@mock.patch("flumine.controls.loggingcontrols.LoggingControl._process_trade")
def test_process_event_trade(self, _process_trade):
mock_event = mock.Mock()
mock_event.EVENT_TYPE = EventType.TRADE
self.logging_control.process_event(mock_event)
_process_trade.assert_called_with(mock_event)
@mock.patch("flumine.controls.loggingcontrols.LoggingControl._process_order")
def test_process_event_order(self, _process_order):
mock_event = mock.Mock()
mock_event.EVENT_TYPE = EventType.ORDER
self.logging_control.process_event(mock_event)
_process_order.assert_called_with(mock_event)
@mock.patch("flumine.controls.loggingcontrols.LoggingControl._process_balance")
def test_process_event_balance(self, _process_balance):
mock_event = mock.Mock()
mock_event.EVENT_TYPE = EventType.BALANCE
self.logging_control.process_event(mock_event)
_process_balance.assert_called_with(mock_event)
@mock.patch(
"flumine.controls.loggingcontrols.LoggingControl._process_cleared_orders"
)
def test_process_event_cleared(self, _process_cleared_orders):
mock_event = mock.Mock()
mock_event.EVENT_TYPE = EventType.CLEARED_ORDERS
self.logging_control.process_event(mock_event)
_process_cleared_orders.assert_called_with(mock_event)
@mock.patch(
"flumine.controls.loggingcontrols.LoggingControl._process_cleared_orders_meta"
)
def test_process_event_cleared_orders_meta(self, _process_cleared_orders_meta):
mock_event = mock.Mock()
mock_event.EVENT_TYPE = EventType.CLEARED_ORDERS_META
self.logging_control.process_event(mock_event)
_process_cleared_orders_meta.assert_called_with(mock_event)
@mock.patch(
"flumine.controls.loggingcontrols.LoggingControl._process_cleared_markets"
)
def test_process_event_cleared_markets(self, _process_cleared_markets):
mock_event = mock.Mock()
mock_event.EVENT_TYPE = EventType.CLEARED_MARKETS
self.logging_control.process_event(mock_event)
_process_cleared_markets.assert_called_with(mock_event)
@mock.patch(
"flumine.controls.loggingcontrols.LoggingControl._process_closed_market"
)
def test_process_event_closed(self, _closed_market):
mock_event = mock.Mock()
mock_event.EVENT_TYPE = EventType.CLOSE_MARKET
self.logging_control.process_event(mock_event)
_closed_market.assert_called_with(mock_event)
@mock.patch("flumine.controls.loggingcontrols.LoggingControl._process_custom_event")
def test_process_event_custom_event(self, _closed_market):
mock_event = mock.Mock()
mock_event.EVENT_TYPE = EventType.CUSTOM_EVENT
self.logging_control.process_event(mock_event)
_closed_market.assert_called_with(mock_event)
@mock.patch("flumine.controls.loggingcontrols.LoggingControl._process_end_flumine")
def test_process_event_end(self, _end_flumine):
mock_event = mock.Mock()
mock_event.EVENT_TYPE = EventType.TERMINATOR
self.logging_control.process_event(mock_event)
_end_flumine.assert_called_with(mock_event)
self.assertIsNone(self.logging_control.logging_queue.get())
def test_process_config(self):
self.logging_control._process_config(None)
def test_process_strategy(self):
self.logging_control._process_strategy(None)
def test_process_market(self):
self.logging_control._process_market(None)
def test_process_trade(self):
self.logging_control._process_trade(None)
def test_process_order(self):
self.logging_control._process_order(None)
def test_process_balance(self):
self.logging_control._process_balance(None)
def test_process_cleared_orders(self):
self.logging_control._process_cleared_orders(None)
def test_process_cleared_orders_meta(self):
self.logging_control._process_cleared_orders_meta(None)
def test_process_cleared_markets(self):
self.logging_control._process_cleared_markets(None)
def test_process_closed_market(self):
self.logging_control._process_closed_market(None)
def METHOD_NAME(self):
self.logging_control._process_custom_event(None)
def test_process_end_flumine(self):
self.logging_control._process_end_flumine(None) |
6,399 | package info | import os
import conan.tools.files
from conans import CMake, ConanFile, tools
from conans.errors import ConanInvalidConfiguration
import textwrap
required_conan_version = ">=1.29.1"
class IgnitionMathConan(ConanFile):
name = "ignition-math"
license = "Apache-2.0"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://gazebosim.org/libs/math"
description = " Math classes and functions for robot applications"
topics = ("ignition", "math", "robotics", "gazebo")
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False], "fPIC": [True, False]}
default_options = {"shared": False, "fPIC": True}
generators = "cmake", "cmake_find_package_multi"
exports_sources = "CMakeLists.txt", "patches/**"
_cmake = None
@property
def _minimum_cpp_standard(self):
return 17
@property
def _minimum_compilers_version(self):
return {
"Visual Studio": "16",
"gcc": "7",
"clang": "5",
"apple-clang": "10",
}
@property
def _source_subfolder(self):
return "source_subfolder"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
del self.options.fPIC
if self.settings.compiler.cppstd:
tools.check_min_cppstd(self, self._minimum_cpp_standard)
min_version = self._minimum_compilers_version.get(str(self.settings.compiler))
if not min_version:
self.output.warn(
"{} recipe lacks information about the {} compiler support.".format(
self.name, self.settings.compiler
)
)
else:
if tools.Version(self.settings.compiler.version) < min_version:
raise ConanInvalidConfiguration(
"{} requires c++17 support. The current compiler {} {} does not support it.".format(
self.name,
self.settings.compiler,
self.settings.compiler.version,
)
)
def requirements(self):
self.requires("eigen/3.3.9")
self.requires("doxygen/1.8.17")
self.requires("swig/4.0.2")
def build_requirements(self):
if int(tools.Version(self.version).minor) <= 8:
self.build_requires("ignition-cmake/2.5.0")
else:
self.build_requires("ignition-cmake/2.10.0")
def source(self):
tools.get(**self.conan_data["sources"][self.version], strip_root=True, destination=self._source_subfolder)
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
self._cmake.definitions["BUILD_TESTING"] = False
self._cmake.configure()
return self._cmake
def build(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy("LICENSE", dst="licenses", src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
tools.rmdir(os.path.join(self.package_folder, "share"))
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
self._create_cmake_module_variables(
os.path.join(self.package_folder, self._module_file_rel_path),
tools.Version(self.version))
# Remove MS runtime files
for dll_pattern_to_remove in ["concrt*.dll", "msvcp*.dll", "vcruntime*.dll"]:
tools.remove_files_by_mask(os.path.join(self.package_folder, "bin"), dll_pattern_to_remove)
@staticmethod
def _create_cmake_module_variables(module_file, version):
content = textwrap.dedent("""\
set(ignition-math{major}_VERSION_MAJOR {major})
set(ignition-math{major}_VERSION_MINOR {minor})
set(ignition-math{major}_VERSION_PATCH {patch})
set(ignition-math{major}_VERSION_STRING "{major}.{minor}.{patch}")
set(ignition-math{major}_INCLUDE_DIRS "${{CMAKE_CURRENT_LIST_DIR}}/../../include/ignition/math{major}")
""".format(major=version.major, minor=version.minor, patch=version.patch))
tools.save(module_file, content)
def METHOD_NAME(self):
version_major = tools.Version(self.version).major
lib_name = f"ignition-math{version_major}"
self.cpp_info.names["cmake_find_package"] = lib_name
self.cpp_info.names["cmake_find_package_multi"] = lib_name
self.cpp_info.names["cmake_paths"] = lib_name
self.cpp_info.components[lib_name].names["cmake_find_package"] = lib_name
self.cpp_info.components[lib_name].names["cmake_find_package_multi"] = lib_name
self.cpp_info.components[lib_name].names["cmake_paths"] = lib_name
self.cpp_info.components[lib_name].libs = [lib_name]
self.cpp_info.components[lib_name].includedirs.append(os.path.join("include", "ignition", "math"+version_major))
self.cpp_info.components[lib_name].requires = ["swig::swig", "eigen::eigen", "doxygen::doxygen"]
self.cpp_info.components[lib_name].builddirs = [self._module_file_rel_dir]
self.cpp_info.components[lib_name].build_modules["cmake_find_package"] = [self._module_file_rel_path]
self.cpp_info.components[lib_name].build_modules["cmake_find_package_multi"] = [self._module_file_rel_path]
self.cpp_info.components[lib_name].build_modules["cmake_paths"] = [self._module_file_rel_path]
self.cpp_info.components["eigen3"].names["cmake_find_package"] = "eigen3"
self.cpp_info.components["eigen3"].names["cmake_find_package_multi"] = "eigen3"
self.cpp_info.components["eigen3"].names["cmake_paths"] = "eigen3"
self.cpp_info.components["eigen3"].includedirs.append(os.path.join("include", "ignition", "math"+version_major))
self.cpp_info.components["eigen3"].requires = ["eigen::eigen"]
self.cpp_info.components["eigen3"].builddirs = [self._module_file_rel_dir]
self.cpp_info.components["eigen3"].build_modules["cmake_find_package"] = [self._module_file_rel_path]
self.cpp_info.components["eigen3"].build_modules["cmake_find_package_multi"] = [self._module_file_rel_path]
self.cpp_info.components["eigen3"].build_modules["cmake_paths"] = [self._module_file_rel_path]
def validate(self):
if self.settings.os == "Macos" and self.settings.arch == "armv8":
raise ConanInvalidConfiguration("sorry, M1 builds are not currently supported, give up!")
@property
def _module_file_rel_dir(self):
return os.path.join("lib", "cmake")
@property
def _module_file_rel_path(self):
return os.path.join(self._module_file_rel_dir, f"conan-official-{self.name}-variables.cmake")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.